transforms.py 54.4 KB
Newer Older
1
import math
vfdev's avatar
vfdev committed
2
import numbers
3
import random
vfdev's avatar
vfdev committed
4
5
6
7
8
9
import warnings
from collections.abc import Sequence, Iterable
from typing import Tuple

import numpy as np
import torch
10
from PIL import Image
vfdev's avatar
vfdev committed
11
12
from torch import Tensor

13
14
15
16
17
18
19
try:
    import accimage
except ImportError:
    accimage = None

from . import functional as F

Tongzhou Wang's avatar
Tongzhou Wang committed
20

21
22
23
24
__all__ = ["Compose", "ToTensor", "PILToTensor", "ConvertImageDtype", "ToPILImage", "Normalize", "Resize", "Scale",
           "CenterCrop", "Pad", "Lambda", "RandomApply", "RandomChoice", "RandomOrder", "RandomCrop",
           "RandomHorizontalFlip", "RandomVerticalFlip", "RandomResizedCrop", "RandomSizedCrop", "FiveCrop", "TenCrop",
           "LinearTransformation", "ColorJitter", "RandomRotation", "RandomAffine", "Grayscale", "RandomGrayscale",
25
           "RandomPerspective", "RandomErasing"]
26

27
28
29
30
31
_pil_interpolation_to_str = {
    Image.NEAREST: 'PIL.Image.NEAREST',
    Image.BILINEAR: 'PIL.Image.BILINEAR',
    Image.BICUBIC: 'PIL.Image.BICUBIC',
    Image.LANCZOS: 'PIL.Image.LANCZOS',
surgan12's avatar
surgan12 committed
32
33
    Image.HAMMING: 'PIL.Image.HAMMING',
    Image.BOX: 'PIL.Image.BOX',
34
35
}

36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57

class Compose(object):
    """Composes several transforms together.

    Args:
        transforms (list of ``Transform`` objects): list of transforms to compose.

    Example:
        >>> transforms.Compose([
        >>>     transforms.CenterCrop(10),
        >>>     transforms.ToTensor(),
        >>> ])
    """

    def __init__(self, transforms):
        self.transforms = transforms

    def __call__(self, img):
        for t in self.transforms:
            img = t(img)
        return img

58
59
60
61
62
63
64
65
    def __repr__(self):
        format_string = self.__class__.__name__ + '('
        for t in self.transforms:
            format_string += '\n'
            format_string += '    {0}'.format(t)
        format_string += '\n)'
        return format_string

66
67
68
69
70

class ToTensor(object):
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.

    Converts a PIL Image or numpy.ndarray (H x W x C) in the range
surgan12's avatar
surgan12 committed
71
72
73
74
75
    [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
    if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
    or if the numpy.ndarray has dtype = np.uint8

    In the other cases, tensors are returned without scaling.
76
77
78
79
80
81
82
83
84
85
86
87
    """

    def __call__(self, pic):
        """
        Args:
            pic (PIL Image or numpy.ndarray): Image to be converted to tensor.

        Returns:
            Tensor: Converted image.
        """
        return F.to_tensor(pic)

88
89
90
    def __repr__(self):
        return self.__class__.__name__ + '()'

91

92
93
94
class PILToTensor(object):
    """Convert a ``PIL Image`` to a tensor of the same type.

vfdev's avatar
vfdev committed
95
    Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
    """

    def __call__(self, pic):
        """
        Args:
            pic (PIL Image): Image to be converted to tensor.

        Returns:
            Tensor: Converted image.
        """
        return F.pil_to_tensor(pic)

    def __repr__(self):
        return self.__class__.__name__ + '()'


112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
class ConvertImageDtype(object):
    """Convert a tensor image to the given ``dtype`` and scale the values accordingly

    Args:
        dtype (torch.dtype): Desired data type of the output

    .. note::

        When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
        If converted back and forth, this mismatch has no effect.

    Raises:
        RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
            well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
            overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
            of the integer ``dtype``.
    """

    def __init__(self, dtype: torch.dtype) -> None:
        self.dtype = dtype

    def __call__(self, image: torch.Tensor) -> torch.Tensor:
        return F.convert_image_dtype(image, self.dtype)


137
138
139
140
141
142
143
144
145
class ToPILImage(object):
    """Convert a tensor or an ndarray to PIL Image.

    Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
    H x W x C to a PIL Image while preserving the value range.

    Args:
        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
            If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
surgan12's avatar
surgan12 committed
146
147
148
149
             - If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
             - If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
             - If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
             - If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
150
               ``short``).
151

csukuangfj's avatar
csukuangfj committed
152
    .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
    """
    def __init__(self, mode=None):
        self.mode = mode

    def __call__(self, pic):
        """
        Args:
            pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.

        Returns:
            PIL Image: Image converted to PIL Image.

        """
        return F.to_pil_image(pic, self.mode)

168
    def __repr__(self):
169
170
171
172
173
        format_string = self.__class__.__name__ + '('
        if self.mode is not None:
            format_string += 'mode={0}'.format(self.mode)
        format_string += ')'
        return format_string
174

175
176

class Normalize(object):
Fang Gao's avatar
Fang Gao committed
177
    """Normalize a tensor image with mean and standard deviation.
178
179
180
    Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``
    channels, this transform will normalize each channel of the input
    ``torch.*Tensor`` i.e.,
abdjava's avatar
abdjava committed
181
    ``output[channel] = (input[channel] - mean[channel]) / std[channel]``
182

183
    .. note::
184
        This transform acts out of place, i.e., it does not mutate the input tensor.
185

186
187
188
    Args:
        mean (sequence): Sequence of means for each channel.
        std (sequence): Sequence of standard deviations for each channel.
189
190
        inplace(bool,optional): Bool to make this operation in-place.

191
192
    """

surgan12's avatar
surgan12 committed
193
    def __init__(self, mean, std, inplace=False):
194
195
        self.mean = mean
        self.std = std
surgan12's avatar
surgan12 committed
196
        self.inplace = inplace
197
198
199
200
201
202
203
204
205

    def __call__(self, tensor):
        """
        Args:
            tensor (Tensor): Tensor image of size (C, H, W) to be normalized.

        Returns:
            Tensor: Normalized Tensor image.
        """
surgan12's avatar
surgan12 committed
206
        return F.normalize(tensor, self.mean, self.std, self.inplace)
207

208
209
210
    def __repr__(self):
        return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)

211
212
213
214
215
216
217
218
219
220
221
222
223
224
225

class Resize(object):
    """Resize the input PIL Image to the given size.

    Args:
        size (sequence or int): Desired output size. If size is a sequence like
            (h, w), output size will be matched to this. If size is an int,
            smaller edge of the image will be matched to this number.
            i.e, if height > width, then image will be rescaled to
            (size * height / width, size)
        interpolation (int, optional): Desired interpolation. Default is
            ``PIL.Image.BILINEAR``
    """

    def __init__(self, size, interpolation=Image.BILINEAR):
Tongzhou Wang's avatar
Tongzhou Wang committed
226
        assert isinstance(size, int) or (isinstance(size, Iterable) and len(size) == 2)
227
228
229
230
231
232
233
234
235
236
237
238
239
        self.size = size
        self.interpolation = interpolation

    def __call__(self, img):
        """
        Args:
            img (PIL Image): Image to be scaled.

        Returns:
            PIL Image: Rescaled image.
        """
        return F.resize(img, self.size, self.interpolation)

240
    def __repr__(self):
241
242
        interpolate_str = _pil_interpolation_to_str[self.interpolation]
        return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)
243

244
245
246
247
248
249
250
251
252
253
254

class Scale(Resize):
    """
    Note: This transform is deprecated in favor of Resize.
    """
    def __init__(self, *args, **kwargs):
        warnings.warn("The use of the transforms.Scale transform is deprecated, " +
                      "please use transforms.Resize instead.")
        super(Scale, self).__init__(*args, **kwargs)


vfdev's avatar
vfdev committed
255
256
257
258
class CenterCrop(torch.nn.Module):
    """Crops the given image at the center.
    The image can be a PIL Image or a torch Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
259
260
261
262

    Args:
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
vfdev's avatar
vfdev committed
263
            made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
264
265
266
    """

    def __init__(self, size):
vfdev's avatar
vfdev committed
267
        super().__init__()
268
269
        if isinstance(size, numbers.Number):
            self.size = (int(size), int(size))
vfdev's avatar
vfdev committed
270
271
        elif isinstance(size, Sequence) and len(size) == 1:
            self.size = (size[0], size[0])
272
        else:
vfdev's avatar
vfdev committed
273
274
275
            if len(size) != 2:
                raise ValueError("Please provide only two dimensions (h, w) for size.")

276
277
            self.size = size

vfdev's avatar
vfdev committed
278
    def forward(self, img):
279
280
        """
        Args:
vfdev's avatar
vfdev committed
281
            img (PIL Image or Tensor): Image to be cropped.
282
283

        Returns:
vfdev's avatar
vfdev committed
284
            PIL Image or Tensor: Cropped image.
285
286
287
        """
        return F.center_crop(img, self.size)

288
289
290
    def __repr__(self):
        return self.__class__.__name__ + '(size={0})'.format(self.size)

291

292
293
294
295
class Pad(torch.nn.Module):
    """Pad the given image on all sides with the given "pad" value.
    The image can be a PIL Image or a torch Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
296
297

    Args:
298
        padding (int or tuple or list): Padding on each border. If a single int is provided this
299
300
            is used to pad all borders. If tuple of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a tuple of length 4 is provided
301
302
303
            this is the padding for the left, top, right and bottom borders respectively.
            In torchscript mode padding as single int is not supported, use a tuple or
            list of length 1: ``[padding, ]``.
304
        fill (int or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
305
            length 3, it is used to fill R, G, B channels respectively.
306
            This value is only used when the padding_mode is constant
307
        padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
308
            Default is constant. Only "constant" is supported for Tensors as of now.
309
310
311
312
313
314
315
316

            - constant: pads with a constant value, this value is specified with fill

            - edge: pads with the last value at the edge of the image

            - reflect: pads with reflection of image without repeating the last value on the edge

                For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
317
                will result in [3, 2, 1, 2, 3, 4, 3, 2]
318
319
320
321

            - symmetric: pads with reflection of image repeating the last value on the edge

                For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
322
                will result in [2, 1, 1, 2, 3, 4, 4, 3]
323
324
    """

325
326
327
328
329
330
331
332
333
334
335
336
337
    def __init__(self, padding, fill=0, padding_mode="constant"):
        super().__init__()
        if not isinstance(padding, (numbers.Number, tuple, list)):
            raise TypeError("Got inappropriate padding arg")

        if not isinstance(fill, (numbers.Number, str, tuple)):
            raise TypeError("Got inappropriate fill arg")

        if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
            raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")

        if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]:
            raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a " +
338
339
340
341
                             "{} element tuple".format(len(padding)))

        self.padding = padding
        self.fill = fill
342
        self.padding_mode = padding_mode
343

344
    def forward(self, img):
345
346
        """
        Args:
347
            img (PIL Image or Tensor): Image to be padded.
348
349

        Returns:
350
            PIL Image or Tensor: Padded image.
351
        """
352
        return F.pad(img, self.padding, self.fill, self.padding_mode)
353

354
    def __repr__(self):
355
356
        return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\
            format(self.padding, self.fill, self.padding_mode)
357

358
359
360
361
362
363
364
365
366

class Lambda(object):
    """Apply a user-defined lambda as a transform.

    Args:
        lambd (function): Lambda/function to be used for transform.
    """

    def __init__(self, lambd):
367
        assert callable(lambd), repr(type(lambd).__name__) + " object is not callable"
368
369
370
371
372
        self.lambd = lambd

    def __call__(self, img):
        return self.lambd(img)

373
374
375
    def __repr__(self):
        return self.__class__.__name__ + '()'

376

377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
class RandomTransforms(object):
    """Base class for a list of transformations with randomness

    Args:
        transforms (list or tuple): list of transformations
    """

    def __init__(self, transforms):
        assert isinstance(transforms, (list, tuple))
        self.transforms = transforms

    def __call__(self, *args, **kwargs):
        raise NotImplementedError()

    def __repr__(self):
        format_string = self.__class__.__name__ + '('
        for t in self.transforms:
            format_string += '\n'
            format_string += '    {0}'.format(t)
        format_string += '\n)'
        return format_string


class RandomApply(RandomTransforms):
    """Apply randomly a list of transformations with a given probability

    Args:
        transforms (list or tuple): list of transformations
        p (float): probability
    """

    def __init__(self, transforms, p=0.5):
        super(RandomApply, self).__init__(transforms)
        self.p = p

    def __call__(self, img):
        if self.p < random.random():
            return img
        for t in self.transforms:
            img = t(img)
        return img

    def __repr__(self):
        format_string = self.__class__.__name__ + '('
        format_string += '\n    p={}'.format(self.p)
        for t in self.transforms:
            format_string += '\n'
            format_string += '    {0}'.format(t)
        format_string += '\n)'
        return format_string


class RandomOrder(RandomTransforms):
    """Apply a list of transformations in a random order
    """
    def __call__(self, img):
        order = list(range(len(self.transforms)))
        random.shuffle(order)
        for i in order:
            img = self.transforms[i](img)
        return img


class RandomChoice(RandomTransforms):
    """Apply single transformation randomly picked from a list
    """
    def __call__(self, img):
        t = random.choice(self.transforms)
        return t(img)


vfdev's avatar
vfdev committed
448
449
450
451
452
class RandomCrop(torch.nn.Module):
    """Crop the given image at a random location.
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading
    dimensions
453
454
455
456

    Args:
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
vfdev's avatar
vfdev committed
457
            made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
458
        padding (int or sequence, optional): Optional padding on each border
vfdev's avatar
vfdev committed
459
460
461
462
463
464
            of the image. Default is None. If a single int is provided this
            is used to pad all borders. If tuple of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a tuple of length 4 is provided
            this is the padding for the left, top, right and bottom borders respectively.
            In torchscript mode padding as single int is not supported, use a tuple or
            list of length 1: ``[padding, ]``.
465
        pad_if_needed (boolean): It will pad the image if smaller than the
ekka's avatar
ekka committed
466
            desired size to avoid raising an exception. Since cropping is done
467
            after padding, the padding seems to be done at a random offset.
vfdev's avatar
vfdev committed
468
        fill (int or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
469
470
            length 3, it is used to fill R, G, B channels respectively.
            This value is only used when the padding_mode is constant
vfdev's avatar
vfdev committed
471
        padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486

             - constant: pads with a constant value, this value is specified with fill

             - edge: pads with the last value on the edge of the image

             - reflect: pads with reflection of image (without repeating the last value on the edge)

                padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
                will result in [3, 2, 1, 2, 3, 4, 3, 2]

             - symmetric: pads with reflection of image (repeating the last value on the edge)

                padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
                will result in [2, 1, 1, 2, 3, 4, 4, 3]

487
488
489
    """

    @staticmethod
vfdev's avatar
vfdev committed
490
    def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int, int]:
491
492
493
        """Get parameters for ``crop`` for a random crop.

        Args:
vfdev's avatar
vfdev committed
494
            img (PIL Image or Tensor): Image to be cropped.
495
496
497
498
499
            output_size (tuple): Expected output size of the crop.

        Returns:
            tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
        """
vfdev's avatar
vfdev committed
500
        w, h = F._get_image_size(img)
501
502
503
504
        th, tw = output_size
        if w == tw and h == th:
            return 0, 0, h, w

vfdev's avatar
vfdev committed
505
506
        i = torch.randint(0, h - th, size=(1, )).item()
        j = torch.randint(0, w - tw, size=(1, )).item()
507
508
        return i, j, th, tw

vfdev's avatar
vfdev committed
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
    def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode="constant"):
        super().__init__()
        if isinstance(size, numbers.Number):
            self.size = (int(size), int(size))
        elif isinstance(size, Sequence) and len(size) == 1:
            self.size = (size[0], size[0])
        else:
            if len(size) != 2:
                raise ValueError("Please provide only two dimensions (h, w) for size.")

            # cast to tuple for torchscript
            self.size = tuple(size)
        self.padding = padding
        self.pad_if_needed = pad_if_needed
        self.fill = fill
        self.padding_mode = padding_mode

    def forward(self, img):
527
528
        """
        Args:
vfdev's avatar
vfdev committed
529
            img (PIL Image or Tensor): Image to be cropped.
530
531

        Returns:
vfdev's avatar
vfdev committed
532
            PIL Image or Tensor: Cropped image.
533
        """
534
535
        if self.padding is not None:
            img = F.pad(img, self.padding, self.fill, self.padding_mode)
536

vfdev's avatar
vfdev committed
537
        width, height = F._get_image_size(img)
538
        # pad the width if needed
vfdev's avatar
vfdev committed
539
540
541
        if self.pad_if_needed and width < self.size[1]:
            padding = [self.size[1] - width, 0]
            img = F.pad(img, padding, self.fill, self.padding_mode)
542
        # pad the height if needed
vfdev's avatar
vfdev committed
543
544
545
        if self.pad_if_needed and height < self.size[0]:
            padding = [0, self.size[0] - height]
            img = F.pad(img, padding, self.fill, self.padding_mode)
546

547
548
549
550
        i, j, h, w = self.get_params(img, self.size)

        return F.crop(img, i, j, h, w)

551
    def __repr__(self):
vfdev's avatar
vfdev committed
552
        return self.__class__.__name__ + "(size={0}, padding={1})".format(self.size, self.padding)
553

554

555
556
557
558
559
class RandomHorizontalFlip(torch.nn.Module):
    """Horizontally flip the given image randomly with a given probability.
    The image can be a PIL Image or a torch Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading
    dimensions
560
561
562
563
564
565

    Args:
        p (float): probability of the image being flipped. Default value is 0.5
    """

    def __init__(self, p=0.5):
566
        super().__init__()
567
        self.p = p
568

569
    def forward(self, img):
570
571
        """
        Args:
572
            img (PIL Image or Tensor): Image to be flipped.
573
574

        Returns:
575
            PIL Image or Tensor: Randomly flipped image.
576
        """
577
        if torch.rand(1) < self.p:
578
579
580
            return F.hflip(img)
        return img

581
    def __repr__(self):
582
        return self.__class__.__name__ + '(p={})'.format(self.p)
583

584

585
class RandomVerticalFlip(torch.nn.Module):
vfdev's avatar
vfdev committed
586
    """Vertically flip the given image randomly with a given probability.
587
588
589
    The image can be a PIL Image or a torch Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading
    dimensions
590
591
592
593
594
595

    Args:
        p (float): probability of the image being flipped. Default value is 0.5
    """

    def __init__(self, p=0.5):
596
        super().__init__()
597
        self.p = p
598

599
    def forward(self, img):
600
601
        """
        Args:
602
            img (PIL Image or Tensor): Image to be flipped.
603
604

        Returns:
605
            PIL Image or Tensor: Randomly flipped image.
606
        """
607
        if torch.rand(1) < self.p:
608
609
610
            return F.vflip(img)
        return img

611
    def __repr__(self):
612
        return self.__class__.__name__ + '(p={})'.format(self.p)
613

614

615
616
617
618
619
620
621
622
623
624
class RandomPerspective(object):
    """Performs Perspective transformation of the given PIL Image randomly with a given probability.

    Args:
        interpolation : Default- Image.BICUBIC

        p (float): probability of the image being perspectively transformed. Default value is 0.5

        distortion_scale(float): it controls the degree of distortion and ranges from 0 to 1. Default value is 0.5.

625
626
        fill (3-tuple or int): RGB pixel fill value for area outside the rotated image.
            If int, it is used for all channels respectively. Default value is 0.
627
628
    """

629
    def __init__(self, distortion_scale=0.5, p=0.5, interpolation=Image.BICUBIC, fill=0):
630
631
632
        self.p = p
        self.interpolation = interpolation
        self.distortion_scale = distortion_scale
633
        self.fill = fill
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648

    def __call__(self, img):
        """
        Args:
            img (PIL Image): Image to be Perspectively transformed.

        Returns:
            PIL Image: Random perspectivley transformed image.
        """
        if not F._is_pil_image(img):
            raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

        if random.random() < self.p:
            width, height = img.size
            startpoints, endpoints = self.get_params(width, height, self.distortion_scale)
649
            return F.perspective(img, startpoints, endpoints, self.interpolation, self.fill)
650
651
652
653
654
655
656
657
658
659
660
        return img

    @staticmethod
    def get_params(width, height, distortion_scale):
        """Get parameters for ``perspective`` for a random perspective transform.

        Args:
            width : width of the image.
            height : height of the image.

        Returns:
661
            List containing [top-left, top-right, bottom-right, bottom-left] of the original image,
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
            List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.
        """
        half_height = int(height / 2)
        half_width = int(width / 2)
        topleft = (random.randint(0, int(distortion_scale * half_width)),
                   random.randint(0, int(distortion_scale * half_height)))
        topright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),
                    random.randint(0, int(distortion_scale * half_height)))
        botright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),
                    random.randint(height - int(distortion_scale * half_height) - 1, height - 1))
        botleft = (random.randint(0, int(distortion_scale * half_width)),
                   random.randint(height - int(distortion_scale * half_height) - 1, height - 1))
        startpoints = [(0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1)]
        endpoints = [topleft, topright, botright, botleft]
        return startpoints, endpoints

    def __repr__(self):
        return self.__class__.__name__ + '(p={})'.format(self.p)


682
683
684
class RandomResizedCrop(object):
    """Crop the given PIL Image to random size and aspect ratio.

685
686
    A crop of random size (default: of 0.08 to 1.0) of the original size and a random
    aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
687
688
689
690
691
    is finally resized to given size.
    This is popularly used to train the Inception networks.

    Args:
        size: expected output size of each edge
692
693
        scale: range of size of the origin size cropped
        ratio: range of aspect ratio of the origin aspect ratio cropped
694
695
696
        interpolation: Default: PIL.Image.BILINEAR
    """

697
    def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):
698
        if isinstance(size, (tuple, list)):
699
700
701
702
703
704
            self.size = size
        else:
            self.size = (size, size)
        if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
            warnings.warn("range should be of kind (min, max)")

705
        self.interpolation = interpolation
706
707
        self.scale = scale
        self.ratio = ratio
708
709

    @staticmethod
710
    def get_params(img, scale, ratio):
711
712
713
714
        """Get parameters for ``crop`` for a random sized crop.

        Args:
            img (PIL Image): Image to be cropped.
715
716
            scale (tuple): range of size of the origin size cropped
            ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
717
718
719
720
721

        Returns:
            tuple: params (i, j, h, w) to be passed to ``crop`` for a random
                sized crop.
        """
vfdev's avatar
vfdev committed
722
        width, height = F._get_image_size(img)
Zhicheng Yan's avatar
Zhicheng Yan committed
723
        area = height * width
724

725
        for _ in range(10):
726
            target_area = random.uniform(*scale) * area
727
728
            log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
            aspect_ratio = math.exp(random.uniform(*log_ratio))
729
730
731
732

            w = int(round(math.sqrt(target_area * aspect_ratio)))
            h = int(round(math.sqrt(target_area / aspect_ratio)))

Zhicheng Yan's avatar
Zhicheng Yan committed
733
734
735
            if 0 < w <= width and 0 < h <= height:
                i = random.randint(0, height - h)
                j = random.randint(0, width - w)
736
737
                return i, j, h, w

738
        # Fallback to central crop
Zhicheng Yan's avatar
Zhicheng Yan committed
739
        in_ratio = float(width) / float(height)
740
        if (in_ratio < min(ratio)):
Zhicheng Yan's avatar
Zhicheng Yan committed
741
            w = width
742
            h = int(round(w / min(ratio)))
743
        elif (in_ratio > max(ratio)):
Zhicheng Yan's avatar
Zhicheng Yan committed
744
            h = height
745
            w = int(round(h * max(ratio)))
746
        else:  # whole image
Zhicheng Yan's avatar
Zhicheng Yan committed
747
748
749
750
            w = width
            h = height
        i = (height - h) // 2
        j = (width - w) // 2
751
        return i, j, h, w
752
753
754
755

    def __call__(self, img):
        """
        Args:
756
            img (PIL Image): Image to be cropped and resized.
757
758

        Returns:
759
            PIL Image: Randomly cropped and resized image.
760
        """
761
        i, j, h, w = self.get_params(img, self.scale, self.ratio)
762
763
        return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)

764
    def __repr__(self):
765
766
        interpolate_str = _pil_interpolation_to_str[self.interpolation]
        format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
767
768
        format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
        format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
769
770
        format_string += ', interpolation={0})'.format(interpolate_str)
        return format_string
771

772
773
774
775
776
777
778
779
780
781
782

class RandomSizedCrop(RandomResizedCrop):
    """
    Note: This transform is deprecated in favor of RandomResizedCrop.
    """
    def __init__(self, *args, **kwargs):
        warnings.warn("The use of the transforms.RandomSizedCrop transform is deprecated, " +
                      "please use transforms.RandomResizedCrop instead.")
        super(RandomSizedCrop, self).__init__(*args, **kwargs)


vfdev's avatar
vfdev committed
783
784
785
786
787
class FiveCrop(torch.nn.Module):
    """Crop the given image into four corners and the central crop.
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading
    dimensions
788
789
790
791
792
793
794
795
796

    .. Note::
         This transform returns a tuple of images and there may be a mismatch in the number of
         inputs and targets your Dataset returns. See below for an example of how to deal with
         this.

    Args:
         size (sequence or int): Desired output size of the crop. If size is an ``int``
            instead of sequence like (h, w), a square crop of size (size, size) is made.
vfdev's avatar
vfdev committed
797
            If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
798
799
800
801
802
803
804
805
806
807
808
809
810
811

    Example:
         >>> transform = Compose([
         >>>    FiveCrop(size), # this is a list of PIL Images
         >>>    Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
         >>> ])
         >>> #In your test loop you can do the following:
         >>> input, target = batch # input is a 5d tensor, target is 2d
         >>> bs, ncrops, c, h, w = input.size()
         >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
         >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
    """

    def __init__(self, size):
vfdev's avatar
vfdev committed
812
        super().__init__()
813
814
        if isinstance(size, numbers.Number):
            self.size = (int(size), int(size))
vfdev's avatar
vfdev committed
815
816
        elif isinstance(size, Sequence) and len(size) == 1:
            self.size = (size[0], size[0])
817
        else:
vfdev's avatar
vfdev committed
818
819
820
            if len(size) != 2:
                raise ValueError("Please provide only two dimensions (h, w) for size.")

821
822
            self.size = size

vfdev's avatar
vfdev committed
823
824
825
826
827
828
829
830
    def forward(self, img):
        """
        Args:
            img (PIL Image or Tensor): Image to be cropped.

        Returns:
            tuple of 5 images. Image can be PIL Image or Tensor
        """
831
832
        return F.five_crop(img, self.size)

833
834
835
    def __repr__(self):
        return self.__class__.__name__ + '(size={0})'.format(self.size)

836

vfdev's avatar
vfdev committed
837
838
839
840
841
842
class TenCrop(torch.nn.Module):
    """Crop the given image into four corners and the central crop plus the flipped version of
    these (horizontal flipping is used by default).
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading
    dimensions
843
844
845
846
847
848
849
850
851

    .. Note::
         This transform returns a tuple of images and there may be a mismatch in the number of
         inputs and targets your Dataset returns. See below for an example of how to deal with
         this.

    Args:
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
vfdev's avatar
vfdev committed
852
            made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
853
        vertical_flip (bool): Use vertical flipping instead of horizontal
854
855
856
857
858
859
860
861
862
863
864
865
866
867

    Example:
         >>> transform = Compose([
         >>>    TenCrop(size), # this is a list of PIL Images
         >>>    Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
         >>> ])
         >>> #In your test loop you can do the following:
         >>> input, target = batch # input is a 5d tensor, target is 2d
         >>> bs, ncrops, c, h, w = input.size()
         >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
         >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
    """

    def __init__(self, size, vertical_flip=False):
vfdev's avatar
vfdev committed
868
        super().__init__()
869
870
        if isinstance(size, numbers.Number):
            self.size = (int(size), int(size))
vfdev's avatar
vfdev committed
871
872
        elif isinstance(size, Sequence) and len(size) == 1:
            self.size = (size[0], size[0])
873
        else:
vfdev's avatar
vfdev committed
874
875
876
            if len(size) != 2:
                raise ValueError("Please provide only two dimensions (h, w) for size.")

877
878
879
            self.size = size
        self.vertical_flip = vertical_flip

vfdev's avatar
vfdev committed
880
881
882
883
884
885
886
887
    def forward(self, img):
        """
        Args:
            img (PIL Image or Tensor): Image to be cropped.

        Returns:
            tuple of 10 images. Image can be PIL Image or Tensor
        """
888
889
        return F.ten_crop(img, self.size, self.vertical_flip)

890
    def __repr__(self):
891
        return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)
892

893

894
class LinearTransformation(object):
ekka's avatar
ekka committed
895
    """Transform a tensor image with a square transformation matrix and a mean_vector computed
896
    offline.
ekka's avatar
ekka committed
897
898
899
    Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and
    subtract mean_vector from it which is then followed by computing the dot
    product with the transformation matrix and then reshaping the tensor to its
900
    original shape.
901

902
    Applications:
903
        whitening transformation: Suppose X is a column vector zero-centered data.
904
905
906
        Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X),
        perform SVD on this matrix and pass it as transformation_matrix.

907
908
    Args:
        transformation_matrix (Tensor): tensor [D x D], D = C x H x W
ekka's avatar
ekka committed
909
        mean_vector (Tensor): tensor [D], D = C x H x W
910
911
    """

ekka's avatar
ekka committed
912
    def __init__(self, transformation_matrix, mean_vector):
913
914
915
        if transformation_matrix.size(0) != transformation_matrix.size(1):
            raise ValueError("transformation_matrix should be square. Got " +
                             "[{} x {}] rectangular matrix.".format(*transformation_matrix.size()))
ekka's avatar
ekka committed
916
917
918

        if mean_vector.size(0) != transformation_matrix.size(0):
            raise ValueError("mean_vector should have the same length {}".format(mean_vector.size(0)) +
Francisco Massa's avatar
Francisco Massa committed
919
920
                             " as any one of the dimensions of the transformation_matrix [{}]"
                             .format(tuple(transformation_matrix.size())))
ekka's avatar
ekka committed
921

922
        self.transformation_matrix = transformation_matrix
ekka's avatar
ekka committed
923
        self.mean_vector = mean_vector
924
925
926
927
928
929
930
931
932
933
934
935
936

    def __call__(self, tensor):
        """
        Args:
            tensor (Tensor): Tensor image of size (C, H, W) to be whitened.

        Returns:
            Tensor: Transformed image.
        """
        if tensor.size(0) * tensor.size(1) * tensor.size(2) != self.transformation_matrix.size(0):
            raise ValueError("tensor and transformation matrix have incompatible shape." +
                             "[{} x {} x {}] != ".format(*tensor.size()) +
                             "{}".format(self.transformation_matrix.size(0)))
ekka's avatar
ekka committed
937
        flat_tensor = tensor.view(1, -1) - self.mean_vector
938
939
940
941
        transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)
        tensor = transformed_tensor.view(tensor.size())
        return tensor

942
    def __repr__(self):
ekka's avatar
ekka committed
943
944
945
        format_string = self.__class__.__name__ + '(transformation_matrix='
        format_string += (str(self.transformation_matrix.tolist()) + ')')
        format_string += (", (mean_vector=" + str(self.mean_vector.tolist()) + ')')
946
947
        return format_string

948

949
class ColorJitter(torch.nn.Module):
950
951
952
    """Randomly change the brightness, contrast and saturation of an image.

    Args:
yaox12's avatar
yaox12 committed
953
954
955
956
957
958
959
960
961
962
963
964
        brightness (float or tuple of float (min, max)): How much to jitter brightness.
            brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
            or the given [min, max]. Should be non negative numbers.
        contrast (float or tuple of float (min, max)): How much to jitter contrast.
            contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
            or the given [min, max]. Should be non negative numbers.
        saturation (float or tuple of float (min, max)): How much to jitter saturation.
            saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
            or the given [min, max]. Should be non negative numbers.
        hue (float or tuple of float (min, max)): How much to jitter hue.
            hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
            Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
965
    """
966

967
    def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
968
        super().__init__()
yaox12's avatar
yaox12 committed
969
970
971
972
973
974
        self.brightness = self._check_input(brightness, 'brightness')
        self.contrast = self._check_input(contrast, 'contrast')
        self.saturation = self._check_input(saturation, 'saturation')
        self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
                                     clip_first_on_zero=False)

975
    @torch.jit.unused
yaox12's avatar
yaox12 committed
976
977
978
979
    def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
        if isinstance(value, numbers.Number):
            if value < 0:
                raise ValueError("If {} is a single number, it must be non negative.".format(name))
980
            value = [center - float(value), center + float(value)]
yaox12's avatar
yaox12 committed
981
            if clip_first_on_zero:
982
                value[0] = max(value[0], 0.0)
yaox12's avatar
yaox12 committed
983
984
985
986
987
988
989
990
991
992
993
        elif isinstance(value, (tuple, list)) and len(value) == 2:
            if not bound[0] <= value[0] <= value[1] <= bound[1]:
                raise ValueError("{} values should be between {}".format(name, bound))
        else:
            raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name))

        # if value is 0 or (1., 1.) for brightness/contrast/saturation
        # or (0., 0.) for hue, do nothing
        if value[0] == value[1] == center:
            value = None
        return value
994
995

    @staticmethod
996
    @torch.jit.unused
997
998
999
1000
1001
1002
1003
1004
1005
1006
    def get_params(brightness, contrast, saturation, hue):
        """Get a randomized transform to be applied on image.

        Arguments are same as that of __init__.

        Returns:
            Transform which randomly adjusts brightness, contrast and
            saturation in a random order.
        """
        transforms = []
yaox12's avatar
yaox12 committed
1007
1008
1009

        if brightness is not None:
            brightness_factor = random.uniform(brightness[0], brightness[1])
1010
1011
            transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))

yaox12's avatar
yaox12 committed
1012
1013
        if contrast is not None:
            contrast_factor = random.uniform(contrast[0], contrast[1])
1014
1015
            transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))

yaox12's avatar
yaox12 committed
1016
1017
        if saturation is not None:
            saturation_factor = random.uniform(saturation[0], saturation[1])
1018
1019
            transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))

yaox12's avatar
yaox12 committed
1020
1021
        if hue is not None:
            hue_factor = random.uniform(hue[0], hue[1])
1022
1023
            transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))

vfdev's avatar
vfdev committed
1024
        random.shuffle(transforms)
1025
1026
1027
1028
        transform = Compose(transforms)

        return transform

1029
    def forward(self, img):
1030
1031
        """
        Args:
1032
            img (PIL Image or Tensor): Input image.
1033
1034

        Returns:
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
            PIL Image or Tensor: Color jittered image.
        """
        fn_idx = torch.randperm(4)
        for fn_id in fn_idx:
            if fn_id == 0 and self.brightness is not None:
                brightness = self.brightness
                brightness_factor = torch.tensor(1.0).uniform_(brightness[0], brightness[1]).item()
                img = F.adjust_brightness(img, brightness_factor)

            if fn_id == 1 and self.contrast is not None:
                contrast = self.contrast
                contrast_factor = torch.tensor(1.0).uniform_(contrast[0], contrast[1]).item()
                img = F.adjust_contrast(img, contrast_factor)

            if fn_id == 2 and self.saturation is not None:
                saturation = self.saturation
                saturation_factor = torch.tensor(1.0).uniform_(saturation[0], saturation[1]).item()
                img = F.adjust_saturation(img, saturation_factor)

            if fn_id == 3 and self.hue is not None:
                hue = self.hue
                hue_factor = torch.tensor(1.0).uniform_(hue[0], hue[1]).item()
                img = F.adjust_hue(img, hue_factor)

        return img
1060

1061
    def __repr__(self):
1062
1063
1064
1065
1066
1067
        format_string = self.__class__.__name__ + '('
        format_string += 'brightness={0}'.format(self.brightness)
        format_string += ', contrast={0}'.format(self.contrast)
        format_string += ', saturation={0}'.format(self.saturation)
        format_string += ', hue={0})'.format(self.hue)
        return format_string
1068

1069
1070
1071
1072
1073
1074
1075
1076
1077

class RandomRotation(object):
    """Rotate the image by angle.

    Args:
        degrees (sequence or float or int): Range of degrees to select from.
            If degrees is a number instead of sequence like (min, max), the range of degrees
            will be (-degrees, +degrees).
        resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
1078
            An optional resampling filter. See `filters`_ for more information.
1079
1080
1081
1082
1083
1084
1085
1086
            If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
        expand (bool, optional): Optional expansion flag.
            If true, expands the output to make it large enough to hold the entire rotated image.
            If false or omitted, make the output image the same size as the input image.
            Note that the expand flag assumes rotation around the center and no translation.
        center (2-tuple, optional): Optional center of rotation.
            Origin is the upper left corner.
            Default is the center of the image.
Philip Meier's avatar
Philip Meier committed
1087
1088
1089
        fill (n-tuple or int or float): Pixel fill value for area outside the rotated
            image. If int or float, the value is used for all bands respectively.
            Defaults to 0 for all bands. This option is only available for ``pillow>=5.2.0``.
1090
1091
1092

    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters

1093
1094
    """

Philip Meier's avatar
Philip Meier committed
1095
    def __init__(self, degrees, resample=False, expand=False, center=None, fill=None):
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
        if isinstance(degrees, numbers.Number):
            if degrees < 0:
                raise ValueError("If degrees is a single number, it must be positive.")
            self.degrees = (-degrees, degrees)
        else:
            if len(degrees) != 2:
                raise ValueError("If degrees is a sequence, it must be of len 2.")
            self.degrees = degrees

        self.resample = resample
        self.expand = expand
        self.center = center
1108
        self.fill = fill
1109
1110
1111
1112
1113
1114
1115
1116

    @staticmethod
    def get_params(degrees):
        """Get parameters for ``rotate`` for a random rotation.

        Returns:
            sequence: params to be passed to ``rotate`` for random rotation.
        """
vfdev's avatar
vfdev committed
1117
        angle = random.uniform(degrees[0], degrees[1])
1118
1119
1120
1121
1122

        return angle

    def __call__(self, img):
        """
1123
        Args:
1124
1125
1126
1127
1128
1129
1130
1131
            img (PIL Image): Image to be rotated.

        Returns:
            PIL Image: Rotated image.
        """

        angle = self.get_params(self.degrees)

1132
        return F.rotate(img, angle, self.resample, self.expand, self.center, self.fill)
1133

1134
    def __repr__(self):
1135
1136
1137
1138
1139
        format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)
        format_string += ', resample={0}'.format(self.resample)
        format_string += ', expand={0}'.format(self.expand)
        if self.center is not None:
            format_string += ', center={0}'.format(self.center)
1140
1141
        if self.fill is not None:
            format_string += ', fill={0}'.format(self.fill)
1142
1143
        format_string += ')'
        return format_string
1144

1145

1146
1147
1148
1149
1150
1151
class RandomAffine(object):
    """Random affine transformation of the image keeping center invariant

    Args:
        degrees (sequence or float or int): Range of degrees to select from.
            If degrees is a number instead of sequence like (min, max), the range of degrees
1152
            will be (-degrees, +degrees). Set to 0 to deactivate rotations.
1153
1154
1155
1156
1157
1158
1159
        translate (tuple, optional): tuple of maximum absolute fraction for horizontal
            and vertical translations. For example translate=(a, b), then horizontal shift
            is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is
            randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
        scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is
            randomly sampled from the range a <= scale <= b. Will keep original scale by default.
        shear (sequence or float or int, optional): Range of degrees to select from.
ptrblck's avatar
ptrblck committed
1160
1161
1162
1163
1164
            If shear is a number, a shear parallel to the x axis in the range (-shear, +shear)
            will be apllied. Else if shear is a tuple or list of 2 values a shear parallel to the x axis in the
            range (shear[0], shear[1]) will be applied. Else if shear is a tuple or list of 4 values,
            a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.
            Will not apply shear by default
1165
        resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
1166
            An optional resampling filter. See `filters`_ for more information.
1167
            If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
Surgan Jandial's avatar
Surgan Jandial committed
1168
1169
        fillcolor (tuple or int): Optional fill color (Tuple for RGB Image And int for grayscale) for the area
            outside the transform in the output image.(Pillow>=5.0.0)
1170
1171
1172

    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters

1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
    """

    def __init__(self, degrees, translate=None, scale=None, shear=None, resample=False, fillcolor=0):
        if isinstance(degrees, numbers.Number):
            if degrees < 0:
                raise ValueError("If degrees is a single number, it must be positive.")
            self.degrees = (-degrees, degrees)
        else:
            assert isinstance(degrees, (tuple, list)) and len(degrees) == 2, \
                "degrees should be a list or tuple and it must be of length 2."
            self.degrees = degrees

        if translate is not None:
            assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
                "translate should be a list or tuple and it must be of length 2."
            for t in translate:
                if not (0.0 <= t <= 1.0):
                    raise ValueError("translation values should be between 0 and 1")
        self.translate = translate

        if scale is not None:
            assert isinstance(scale, (tuple, list)) and len(scale) == 2, \
                "scale should be a list or tuple and it must be of length 2."
            for s in scale:
                if s <= 0:
                    raise ValueError("scale values should be positive")
        self.scale = scale

        if shear is not None:
            if isinstance(shear, numbers.Number):
                if shear < 0:
                    raise ValueError("If shear is a single number, it must be positive.")
                self.shear = (-shear, shear)
            else:
ptrblck's avatar
ptrblck committed
1207
1208
1209
1210
1211
1212
1213
1214
                assert isinstance(shear, (tuple, list)) and \
                    (len(shear) == 2 or len(shear) == 4), \
                    "shear should be a list or tuple and it must be of length 2 or 4."
                # X-Axis shear with [min, max]
                if len(shear) == 2:
                    self.shear = [shear[0], shear[1], 0., 0.]
                elif len(shear) == 4:
                    self.shear = [s for s in shear]
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
        else:
            self.shear = shear

        self.resample = resample
        self.fillcolor = fillcolor

    @staticmethod
    def get_params(degrees, translate, scale_ranges, shears, img_size):
        """Get parameters for affine transformation

        Returns:
            sequence: params to be passed to the affine transformation
        """
        angle = random.uniform(degrees[0], degrees[1])
        if translate is not None:
            max_dx = translate[0] * img_size[0]
            max_dy = translate[1] * img_size[1]
            translations = (np.round(random.uniform(-max_dx, max_dx)),
                            np.round(random.uniform(-max_dy, max_dy)))
        else:
            translations = (0, 0)

        if scale_ranges is not None:
            scale = random.uniform(scale_ranges[0], scale_ranges[1])
        else:
            scale = 1.0

        if shears is not None:
ptrblck's avatar
ptrblck committed
1243
1244
1245
1246
1247
            if len(shears) == 2:
                shear = [random.uniform(shears[0], shears[1]), 0.]
            elif len(shears) == 4:
                shear = [random.uniform(shears[0], shears[1]),
                         random.uniform(shears[2], shears[3])]
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
        else:
            shear = 0.0

        return angle, translations, scale, shear

    def __call__(self, img):
        """
            img (PIL Image): Image to be transformed.

        Returns:
            PIL Image: Affine transformed image.
        """
        ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img.size)
        return F.affine(img, *ret, resample=self.resample, fillcolor=self.fillcolor)

    def __repr__(self):
        s = '{name}(degrees={degrees}'
        if self.translate is not None:
            s += ', translate={translate}'
        if self.scale is not None:
            s += ', scale={scale}'
        if self.shear is not None:
            s += ', shear={shear}'
        if self.resample > 0:
            s += ', resample={resample}'
        if self.fillcolor != 0:
            s += ', fillcolor={fillcolor}'
        s += ')'
        d = dict(self.__dict__)
        d['resample'] = _pil_interpolation_to_str[d['resample']]
        return s.format(name=self.__class__.__name__, **d)


1281
1282
class Grayscale(object):
    """Convert image to grayscale.
1283

1284
1285
1286
1287
    Args:
        num_output_channels (int): (1 or 3) number of channels desired for output image

    Returns:
1288
        PIL Image: Grayscale version of the input.
1289
1290
         - If ``num_output_channels == 1`` : returned image is single channel
         - If ``num_output_channels == 3`` : returned image is 3 channel with r == g == b
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306

    """

    def __init__(self, num_output_channels=1):
        self.num_output_channels = num_output_channels

    def __call__(self, img):
        """
        Args:
            img (PIL Image): Image to be converted to grayscale.

        Returns:
            PIL Image: Randomly grayscaled image.
        """
        return F.to_grayscale(img, num_output_channels=self.num_output_channels)

1307
    def __repr__(self):
1308
        return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)
1309

1310
1311
1312

class RandomGrayscale(object):
    """Randomly convert image to grayscale with a probability of p (default 0.1).
1313

1314
1315
1316
1317
    Args:
        p (float): probability that image should be converted to grayscale.

    Returns:
1318
1319
1320
1321
        PIL Image: Grayscale version of the input image with probability p and unchanged
        with probability (1-p).
        - If input image is 1 channel: grayscale version is 1 channel
        - If input image is 3 channel: grayscale version is 3 channel with r == g == b
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339

    """

    def __init__(self, p=0.1):
        self.p = p

    def __call__(self, img):
        """
        Args:
            img (PIL Image): Image to be converted to grayscale.

        Returns:
            PIL Image: Randomly grayscaled image.
        """
        num_output_channels = 1 if img.mode == 'L' else 3
        if random.random() < self.p:
            return F.to_grayscale(img, num_output_channels=num_output_channels)
        return img
1340
1341

    def __repr__(self):
1342
        return self.__class__.__name__ + '(p={0})'.format(self.p)
1343
1344
1345
1346


class RandomErasing(object):
    """ Randomly selects a rectangle region in an image and erases its pixels.
1347
1348
    'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/pdf/1708.04896.pdf

1349
1350
1351
1352
1353
1354
1355
1356
    Args:
         p: probability that the random erasing operation will be performed.
         scale: range of proportion of erased area against input image.
         ratio: range of aspect ratio of erased area.
         value: erasing value. Default is 0. If a single int, it is used to
            erase all pixels. If a tuple of length 3, it is used to erase
            R, G, B channels respectively.
            If a str of 'random', erasing each pixel with random values.
Zhun Zhong's avatar
Zhun Zhong committed
1357
         inplace: boolean to make this transform inplace. Default set to False.
1358

1359
1360
    Returns:
        Erased Image.
1361

1362
1363
    # Examples:
        >>> transform = transforms.Compose([
1364
1365
1366
1367
        >>>   transforms.RandomHorizontalFlip(),
        >>>   transforms.ToTensor(),
        >>>   transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        >>>   transforms.RandomErasing(),
1368
1369
1370
        >>> ])
    """

Zhun Zhong's avatar
Zhun Zhong committed
1371
    def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False):
1372
1373
1374
1375
1376
        assert isinstance(value, (numbers.Number, str, tuple, list))
        if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
            warnings.warn("range should be of kind (min, max)")
        if scale[0] < 0 or scale[1] > 1:
            raise ValueError("range of scale should be between 0 and 1")
1377
1378
        if p < 0 or p > 1:
            raise ValueError("range of random erasing probability should be between 0 and 1")
1379
1380
1381
1382
1383

        self.p = p
        self.scale = scale
        self.ratio = ratio
        self.value = value
1384
        self.inplace = inplace
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397

    @staticmethod
    def get_params(img, scale, ratio, value=0):
        """Get parameters for ``erase`` for a random erasing.

        Args:
            img (Tensor): Tensor image of size (C, H, W) to be erased.
            scale: range of proportion of erased area against input image.
            ratio: range of aspect ratio of erased area.

        Returns:
            tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing.
        """
Zhun Zhong's avatar
Zhun Zhong committed
1398
        img_c, img_h, img_w = img.shape
1399
        area = img_h * img_w
1400

1401
        for _ in range(10):
1402
1403
1404
1405
1406
1407
            erase_area = random.uniform(scale[0], scale[1]) * area
            aspect_ratio = random.uniform(ratio[0], ratio[1])

            h = int(round(math.sqrt(erase_area * aspect_ratio)))
            w = int(round(math.sqrt(erase_area / aspect_ratio)))

1408
1409
1410
            if h < img_h and w < img_w:
                i = random.randint(0, img_h - h)
                j = random.randint(0, img_w - w)
1411
1412
1413
                if isinstance(value, numbers.Number):
                    v = value
                elif isinstance(value, torch._six.string_classes):
Zhun Zhong's avatar
Zhun Zhong committed
1414
                    v = torch.empty([img_c, h, w], dtype=torch.float32).normal_()
1415
1416
1417
1418
                elif isinstance(value, (list, tuple)):
                    v = torch.tensor(value, dtype=torch.float32).view(-1, 1, 1).expand(-1, h, w)
                return i, j, h, w, v

Zhun Zhong's avatar
Zhun Zhong committed
1419
1420
1421
        # Return original image
        return 0, 0, img_h, img_w, img

1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
    def __call__(self, img):
        """
        Args:
            img (Tensor): Tensor image of size (C, H, W) to be erased.

        Returns:
            img (Tensor): Erased Tensor image.
        """
        if random.uniform(0, 1) < self.p:
            x, y, h, w, v = self.get_params(img, scale=self.scale, ratio=self.ratio, value=self.value)
1432
            return F.erase(img, x, y, h, w, v, self.inplace)
1433
        return img