transforms.py 56.3 KB
Newer Older
1
import math
vfdev's avatar
vfdev committed
2
import numbers
3
import random
vfdev's avatar
vfdev committed
4
import warnings
vfdev's avatar
vfdev committed
5
from collections.abc import Sequence
6
from typing import Tuple, List, Optional
vfdev's avatar
vfdev committed
7
8
9

import numpy as np
import torch
10
from PIL import Image
vfdev's avatar
vfdev committed
11
12
from torch import Tensor

13
14
15
16
17
18
19
try:
    import accimage
except ImportError:
    accimage = None

from . import functional as F

Tongzhou Wang's avatar
Tongzhou Wang committed
20

21
22
23
24
__all__ = ["Compose", "ToTensor", "PILToTensor", "ConvertImageDtype", "ToPILImage", "Normalize", "Resize", "Scale",
           "CenterCrop", "Pad", "Lambda", "RandomApply", "RandomChoice", "RandomOrder", "RandomCrop",
           "RandomHorizontalFlip", "RandomVerticalFlip", "RandomResizedCrop", "RandomSizedCrop", "FiveCrop", "TenCrop",
           "LinearTransformation", "ColorJitter", "RandomRotation", "RandomAffine", "Grayscale", "RandomGrayscale",
25
           "RandomPerspective", "RandomErasing"]
26

27
28
29
30
31
_pil_interpolation_to_str = {
    Image.NEAREST: 'PIL.Image.NEAREST',
    Image.BILINEAR: 'PIL.Image.BILINEAR',
    Image.BICUBIC: 'PIL.Image.BICUBIC',
    Image.LANCZOS: 'PIL.Image.LANCZOS',
surgan12's avatar
surgan12 committed
32
33
    Image.HAMMING: 'PIL.Image.HAMMING',
    Image.BOX: 'PIL.Image.BOX',
34
35
}

36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57

class Compose(object):
    """Composes several transforms together.

    Args:
        transforms (list of ``Transform`` objects): list of transforms to compose.

    Example:
        >>> transforms.Compose([
        >>>     transforms.CenterCrop(10),
        >>>     transforms.ToTensor(),
        >>> ])
    """

    def __init__(self, transforms):
        self.transforms = transforms

    def __call__(self, img):
        for t in self.transforms:
            img = t(img)
        return img

58
59
60
61
62
63
64
65
    def __repr__(self):
        format_string = self.__class__.__name__ + '('
        for t in self.transforms:
            format_string += '\n'
            format_string += '    {0}'.format(t)
        format_string += '\n)'
        return format_string

66
67
68
69
70

class ToTensor(object):
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.

    Converts a PIL Image or numpy.ndarray (H x W x C) in the range
surgan12's avatar
surgan12 committed
71
72
73
74
75
    [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
    if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1)
    or if the numpy.ndarray has dtype = np.uint8

    In the other cases, tensors are returned without scaling.
76
77
78
79
80
81
82
83
84
85
86
87
    """

    def __call__(self, pic):
        """
        Args:
            pic (PIL Image or numpy.ndarray): Image to be converted to tensor.

        Returns:
            Tensor: Converted image.
        """
        return F.to_tensor(pic)

88
89
90
    def __repr__(self):
        return self.__class__.__name__ + '()'

91

92
93
94
class PILToTensor(object):
    """Convert a ``PIL Image`` to a tensor of the same type.

vfdev's avatar
vfdev committed
95
    Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W).
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
    """

    def __call__(self, pic):
        """
        Args:
            pic (PIL Image): Image to be converted to tensor.

        Returns:
            Tensor: Converted image.
        """
        return F.pil_to_tensor(pic)

    def __repr__(self):
        return self.__class__.__name__ + '()'


112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
class ConvertImageDtype(object):
    """Convert a tensor image to the given ``dtype`` and scale the values accordingly

    Args:
        dtype (torch.dtype): Desired data type of the output

    .. note::

        When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly.
        If converted back and forth, this mismatch has no effect.

    Raises:
        RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as
            well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to
            overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range
            of the integer ``dtype``.
    """

    def __init__(self, dtype: torch.dtype) -> None:
        self.dtype = dtype

    def __call__(self, image: torch.Tensor) -> torch.Tensor:
        return F.convert_image_dtype(image, self.dtype)


137
138
139
140
141
142
143
144
145
class ToPILImage(object):
    """Convert a tensor or an ndarray to PIL Image.

    Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
    H x W x C to a PIL Image while preserving the value range.

    Args:
        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
            If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
surgan12's avatar
surgan12 committed
146
147
148
149
             - If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
             - If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
             - If the input has 2 channels, the ``mode`` is assumed to be ``LA``.
             - If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``,
150
               ``short``).
151

csukuangfj's avatar
csukuangfj committed
152
    .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
    """
    def __init__(self, mode=None):
        self.mode = mode

    def __call__(self, pic):
        """
        Args:
            pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.

        Returns:
            PIL Image: Image converted to PIL Image.

        """
        return F.to_pil_image(pic, self.mode)

168
    def __repr__(self):
169
170
171
172
173
        format_string = self.__class__.__name__ + '('
        if self.mode is not None:
            format_string += 'mode={0}'.format(self.mode)
        format_string += ')'
        return format_string
174

175
176

class Normalize(object):
Fang Gao's avatar
Fang Gao committed
177
    """Normalize a tensor image with mean and standard deviation.
178
179
180
    Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n``
    channels, this transform will normalize each channel of the input
    ``torch.*Tensor`` i.e.,
abdjava's avatar
abdjava committed
181
    ``output[channel] = (input[channel] - mean[channel]) / std[channel]``
182

183
    .. note::
184
        This transform acts out of place, i.e., it does not mutate the input tensor.
185

186
187
188
    Args:
        mean (sequence): Sequence of means for each channel.
        std (sequence): Sequence of standard deviations for each channel.
189
190
        inplace(bool,optional): Bool to make this operation in-place.

191
192
    """

surgan12's avatar
surgan12 committed
193
    def __init__(self, mean, std, inplace=False):
194
195
        self.mean = mean
        self.std = std
surgan12's avatar
surgan12 committed
196
        self.inplace = inplace
197
198
199
200
201
202
203
204
205

    def __call__(self, tensor):
        """
        Args:
            tensor (Tensor): Tensor image of size (C, H, W) to be normalized.

        Returns:
            Tensor: Normalized Tensor image.
        """
surgan12's avatar
surgan12 committed
206
        return F.normalize(tensor, self.mean, self.std, self.inplace)
207

208
209
210
    def __repr__(self):
        return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)

211

vfdev's avatar
vfdev committed
212
213
214
215
class Resize(torch.nn.Module):
    """Resize the input image to the given size.
    The image can be a PIL Image or a torch Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
216
217
218
219
220
221

    Args:
        size (sequence or int): Desired output size. If size is a sequence like
            (h, w), output size will be matched to this. If size is an int,
            smaller edge of the image will be matched to this number.
            i.e, if height > width, then image will be rescaled to
vfdev's avatar
vfdev committed
222
223
224
225
            (size * height / width, size).
            In torchscript mode padding as single int is not supported, use a tuple or
            list of length 1: ``[size, ]``.
        interpolation (int, optional): Desired interpolation. Default is ``PIL.Image.BILINEAR``
226
227
228
    """

    def __init__(self, size, interpolation=Image.BILINEAR):
vfdev's avatar
vfdev committed
229
230
231
232
233
        super().__init__()
        if not isinstance(size, (int, Sequence)):
            raise TypeError("Size should be int or sequence. Got {}".format(type(size)))
        if isinstance(size, Sequence) and len(size) not in (1, 2):
            raise ValueError("If size is a sequence, it should have 1 or 2 values")
234
235
236
        self.size = size
        self.interpolation = interpolation

vfdev's avatar
vfdev committed
237
    def forward(self, img):
238
239
        """
        Args:
vfdev's avatar
vfdev committed
240
            img (PIL Image or Tensor): Image to be scaled.
241
242

        Returns:
vfdev's avatar
vfdev committed
243
            PIL Image or Tensor: Rescaled image.
244
245
246
        """
        return F.resize(img, self.size, self.interpolation)

247
    def __repr__(self):
248
249
        interpolate_str = _pil_interpolation_to_str[self.interpolation]
        return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)
250

251
252
253
254
255
256
257
258
259
260
261

class Scale(Resize):
    """
    Note: This transform is deprecated in favor of Resize.
    """
    def __init__(self, *args, **kwargs):
        warnings.warn("The use of the transforms.Scale transform is deprecated, " +
                      "please use transforms.Resize instead.")
        super(Scale, self).__init__(*args, **kwargs)


vfdev's avatar
vfdev committed
262
263
264
265
class CenterCrop(torch.nn.Module):
    """Crops the given image at the center.
    The image can be a PIL Image or a torch Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
266
267
268
269

    Args:
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
vfdev's avatar
vfdev committed
270
            made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
271
272
273
    """

    def __init__(self, size):
vfdev's avatar
vfdev committed
274
        super().__init__()
275
276
        if isinstance(size, numbers.Number):
            self.size = (int(size), int(size))
vfdev's avatar
vfdev committed
277
278
        elif isinstance(size, Sequence) and len(size) == 1:
            self.size = (size[0], size[0])
279
        else:
vfdev's avatar
vfdev committed
280
281
282
            if len(size) != 2:
                raise ValueError("Please provide only two dimensions (h, w) for size.")

283
284
            self.size = size

vfdev's avatar
vfdev committed
285
    def forward(self, img):
286
287
        """
        Args:
vfdev's avatar
vfdev committed
288
            img (PIL Image or Tensor): Image to be cropped.
289
290

        Returns:
vfdev's avatar
vfdev committed
291
            PIL Image or Tensor: Cropped image.
292
293
294
        """
        return F.center_crop(img, self.size)

295
296
297
    def __repr__(self):
        return self.__class__.__name__ + '(size={0})'.format(self.size)

298

299
300
301
302
class Pad(torch.nn.Module):
    """Pad the given image on all sides with the given "pad" value.
    The image can be a PIL Image or a torch Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions
303
304

    Args:
305
        padding (int or tuple or list): Padding on each border. If a single int is provided this
306
307
            is used to pad all borders. If tuple of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a tuple of length 4 is provided
308
309
310
            this is the padding for the left, top, right and bottom borders respectively.
            In torchscript mode padding as single int is not supported, use a tuple or
            list of length 1: ``[padding, ]``.
311
        fill (int or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
312
            length 3, it is used to fill R, G, B channels respectively.
313
            This value is only used when the padding_mode is constant
314
        padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric.
vfdev's avatar
vfdev committed
315
            Default is constant. Mode symmetric is not yet supported for Tensor inputs.
316
317
318
319
320
321
322
323

            - constant: pads with a constant value, this value is specified with fill

            - edge: pads with the last value at the edge of the image

            - reflect: pads with reflection of image without repeating the last value on the edge

                For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
324
                will result in [3, 2, 1, 2, 3, 4, 3, 2]
325
326
327
328

            - symmetric: pads with reflection of image repeating the last value on the edge

                For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
329
                will result in [2, 1, 1, 2, 3, 4, 4, 3]
330
331
    """

332
333
334
335
336
337
338
339
340
341
342
343
344
    def __init__(self, padding, fill=0, padding_mode="constant"):
        super().__init__()
        if not isinstance(padding, (numbers.Number, tuple, list)):
            raise TypeError("Got inappropriate padding arg")

        if not isinstance(fill, (numbers.Number, str, tuple)):
            raise TypeError("Got inappropriate fill arg")

        if padding_mode not in ["constant", "edge", "reflect", "symmetric"]:
            raise ValueError("Padding mode should be either constant, edge, reflect or symmetric")

        if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]:
            raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a " +
345
346
347
348
                             "{} element tuple".format(len(padding)))

        self.padding = padding
        self.fill = fill
349
        self.padding_mode = padding_mode
350

351
    def forward(self, img):
352
353
        """
        Args:
354
            img (PIL Image or Tensor): Image to be padded.
355
356

        Returns:
357
            PIL Image or Tensor: Padded image.
358
        """
359
        return F.pad(img, self.padding, self.fill, self.padding_mode)
360

361
    def __repr__(self):
362
363
        return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\
            format(self.padding, self.fill, self.padding_mode)
364

365
366
367
368
369
370
371
372
373

class Lambda(object):
    """Apply a user-defined lambda as a transform.

    Args:
        lambd (function): Lambda/function to be used for transform.
    """

    def __init__(self, lambd):
374
        assert callable(lambd), repr(type(lambd).__name__) + " object is not callable"
375
376
377
378
379
        self.lambd = lambd

    def __call__(self, img):
        return self.lambd(img)

380
381
382
    def __repr__(self):
        return self.__class__.__name__ + '()'

383

384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
class RandomTransforms(object):
    """Base class for a list of transformations with randomness

    Args:
        transforms (list or tuple): list of transformations
    """

    def __init__(self, transforms):
        assert isinstance(transforms, (list, tuple))
        self.transforms = transforms

    def __call__(self, *args, **kwargs):
        raise NotImplementedError()

    def __repr__(self):
        format_string = self.__class__.__name__ + '('
        for t in self.transforms:
            format_string += '\n'
            format_string += '    {0}'.format(t)
        format_string += '\n)'
        return format_string


class RandomApply(RandomTransforms):
    """Apply randomly a list of transformations with a given probability

    Args:
        transforms (list or tuple): list of transformations
        p (float): probability
    """

    def __init__(self, transforms, p=0.5):
        super(RandomApply, self).__init__(transforms)
        self.p = p

    def __call__(self, img):
        if self.p < random.random():
            return img
        for t in self.transforms:
            img = t(img)
        return img

    def __repr__(self):
        format_string = self.__class__.__name__ + '('
        format_string += '\n    p={}'.format(self.p)
        for t in self.transforms:
            format_string += '\n'
            format_string += '    {0}'.format(t)
        format_string += '\n)'
        return format_string


class RandomOrder(RandomTransforms):
    """Apply a list of transformations in a random order
    """
    def __call__(self, img):
        order = list(range(len(self.transforms)))
        random.shuffle(order)
        for i in order:
            img = self.transforms[i](img)
        return img


class RandomChoice(RandomTransforms):
    """Apply single transformation randomly picked from a list
    """
    def __call__(self, img):
        t = random.choice(self.transforms)
        return t(img)


vfdev's avatar
vfdev committed
455
456
457
458
459
class RandomCrop(torch.nn.Module):
    """Crop the given image at a random location.
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading
    dimensions
460
461
462
463

    Args:
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
vfdev's avatar
vfdev committed
464
            made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
465
        padding (int or sequence, optional): Optional padding on each border
vfdev's avatar
vfdev committed
466
467
468
469
470
471
            of the image. Default is None. If a single int is provided this
            is used to pad all borders. If tuple of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a tuple of length 4 is provided
            this is the padding for the left, top, right and bottom borders respectively.
            In torchscript mode padding as single int is not supported, use a tuple or
            list of length 1: ``[padding, ]``.
472
        pad_if_needed (boolean): It will pad the image if smaller than the
ekka's avatar
ekka committed
473
            desired size to avoid raising an exception. Since cropping is done
474
            after padding, the padding seems to be done at a random offset.
vfdev's avatar
vfdev committed
475
        fill (int or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of
476
477
            length 3, it is used to fill R, G, B channels respectively.
            This value is only used when the padding_mode is constant
vfdev's avatar
vfdev committed
478
        padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Default is constant.
vfdev's avatar
vfdev committed
479
            Mode symmetric is not yet supported for Tensor inputs.
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494

             - constant: pads with a constant value, this value is specified with fill

             - edge: pads with the last value on the edge of the image

             - reflect: pads with reflection of image (without repeating the last value on the edge)

                padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode
                will result in [3, 2, 1, 2, 3, 4, 3, 2]

             - symmetric: pads with reflection of image (repeating the last value on the edge)

                padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode
                will result in [2, 1, 1, 2, 3, 4, 4, 3]

495
496
497
    """

    @staticmethod
vfdev's avatar
vfdev committed
498
    def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int, int]:
499
500
501
        """Get parameters for ``crop`` for a random crop.

        Args:
vfdev's avatar
vfdev committed
502
            img (PIL Image or Tensor): Image to be cropped.
503
504
505
506
507
            output_size (tuple): Expected output size of the crop.

        Returns:
            tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
        """
vfdev's avatar
vfdev committed
508
        w, h = F._get_image_size(img)
509
510
511
512
        th, tw = output_size
        if w == tw and h == th:
            return 0, 0, h, w

vfdev's avatar
vfdev committed
513
514
        i = torch.randint(0, h - th, size=(1, )).item()
        j = torch.randint(0, w - tw, size=(1, )).item()
515
516
        return i, j, th, tw

vfdev's avatar
vfdev committed
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
    def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode="constant"):
        super().__init__()
        if isinstance(size, numbers.Number):
            self.size = (int(size), int(size))
        elif isinstance(size, Sequence) and len(size) == 1:
            self.size = (size[0], size[0])
        else:
            if len(size) != 2:
                raise ValueError("Please provide only two dimensions (h, w) for size.")

            # cast to tuple for torchscript
            self.size = tuple(size)
        self.padding = padding
        self.pad_if_needed = pad_if_needed
        self.fill = fill
        self.padding_mode = padding_mode

    def forward(self, img):
535
536
        """
        Args:
vfdev's avatar
vfdev committed
537
            img (PIL Image or Tensor): Image to be cropped.
538
539

        Returns:
vfdev's avatar
vfdev committed
540
            PIL Image or Tensor: Cropped image.
541
        """
542
543
        if self.padding is not None:
            img = F.pad(img, self.padding, self.fill, self.padding_mode)
544

vfdev's avatar
vfdev committed
545
        width, height = F._get_image_size(img)
546
        # pad the width if needed
vfdev's avatar
vfdev committed
547
548
549
        if self.pad_if_needed and width < self.size[1]:
            padding = [self.size[1] - width, 0]
            img = F.pad(img, padding, self.fill, self.padding_mode)
550
        # pad the height if needed
vfdev's avatar
vfdev committed
551
552
553
        if self.pad_if_needed and height < self.size[0]:
            padding = [0, self.size[0] - height]
            img = F.pad(img, padding, self.fill, self.padding_mode)
554

555
556
557
558
        i, j, h, w = self.get_params(img, self.size)

        return F.crop(img, i, j, h, w)

559
    def __repr__(self):
vfdev's avatar
vfdev committed
560
        return self.__class__.__name__ + "(size={0}, padding={1})".format(self.size, self.padding)
561

562

563
564
565
566
567
class RandomHorizontalFlip(torch.nn.Module):
    """Horizontally flip the given image randomly with a given probability.
    The image can be a PIL Image or a torch Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading
    dimensions
568
569
570
571
572
573

    Args:
        p (float): probability of the image being flipped. Default value is 0.5
    """

    def __init__(self, p=0.5):
574
        super().__init__()
575
        self.p = p
576

577
    def forward(self, img):
578
579
        """
        Args:
580
            img (PIL Image or Tensor): Image to be flipped.
581
582

        Returns:
583
            PIL Image or Tensor: Randomly flipped image.
584
        """
585
        if torch.rand(1) < self.p:
586
587
588
            return F.hflip(img)
        return img

589
    def __repr__(self):
590
        return self.__class__.__name__ + '(p={})'.format(self.p)
591

592

593
class RandomVerticalFlip(torch.nn.Module):
vfdev's avatar
vfdev committed
594
    """Vertically flip the given image randomly with a given probability.
595
596
597
    The image can be a PIL Image or a torch Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading
    dimensions
598
599
600
601
602
603

    Args:
        p (float): probability of the image being flipped. Default value is 0.5
    """

    def __init__(self, p=0.5):
604
        super().__init__()
605
        self.p = p
606

607
    def forward(self, img):
608
609
        """
        Args:
610
            img (PIL Image or Tensor): Image to be flipped.
611
612

        Returns:
613
            PIL Image or Tensor: Randomly flipped image.
614
        """
615
        if torch.rand(1) < self.p:
616
617
618
            return F.vflip(img)
        return img

619
    def __repr__(self):
620
        return self.__class__.__name__ + '(p={})'.format(self.p)
621

622

623
624
625
626
627
628
629
630
631
632
class RandomPerspective(object):
    """Performs Perspective transformation of the given PIL Image randomly with a given probability.

    Args:
        interpolation : Default- Image.BICUBIC

        p (float): probability of the image being perspectively transformed. Default value is 0.5

        distortion_scale(float): it controls the degree of distortion and ranges from 0 to 1. Default value is 0.5.

633
634
        fill (3-tuple or int): RGB pixel fill value for area outside the rotated image.
            If int, it is used for all channels respectively. Default value is 0.
635
636
    """

637
    def __init__(self, distortion_scale=0.5, p=0.5, interpolation=Image.BICUBIC, fill=0):
638
639
640
        self.p = p
        self.interpolation = interpolation
        self.distortion_scale = distortion_scale
641
        self.fill = fill
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656

    def __call__(self, img):
        """
        Args:
            img (PIL Image): Image to be Perspectively transformed.

        Returns:
            PIL Image: Random perspectivley transformed image.
        """
        if not F._is_pil_image(img):
            raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

        if random.random() < self.p:
            width, height = img.size
            startpoints, endpoints = self.get_params(width, height, self.distortion_scale)
657
            return F.perspective(img, startpoints, endpoints, self.interpolation, self.fill)
658
659
660
661
662
663
664
665
666
667
668
        return img

    @staticmethod
    def get_params(width, height, distortion_scale):
        """Get parameters for ``perspective`` for a random perspective transform.

        Args:
            width : width of the image.
            height : height of the image.

        Returns:
669
            List containing [top-left, top-right, bottom-right, bottom-left] of the original image,
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
            List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image.
        """
        half_height = int(height / 2)
        half_width = int(width / 2)
        topleft = (random.randint(0, int(distortion_scale * half_width)),
                   random.randint(0, int(distortion_scale * half_height)))
        topright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),
                    random.randint(0, int(distortion_scale * half_height)))
        botright = (random.randint(width - int(distortion_scale * half_width) - 1, width - 1),
                    random.randint(height - int(distortion_scale * half_height) - 1, height - 1))
        botleft = (random.randint(0, int(distortion_scale * half_width)),
                   random.randint(height - int(distortion_scale * half_height) - 1, height - 1))
        startpoints = [(0, 0), (width - 1, 0), (width - 1, height - 1), (0, height - 1)]
        endpoints = [topleft, topright, botright, botleft]
        return startpoints, endpoints

    def __repr__(self):
        return self.__class__.__name__ + '(p={})'.format(self.p)


690
691
692
class RandomResizedCrop(object):
    """Crop the given PIL Image to random size and aspect ratio.

693
694
    A crop of random size (default: of 0.08 to 1.0) of the original size and a random
    aspect ratio (default: of 3/4 to 4/3) of the original aspect ratio is made. This crop
695
696
697
698
699
    is finally resized to given size.
    This is popularly used to train the Inception networks.

    Args:
        size: expected output size of each edge
700
701
        scale: range of size of the origin size cropped
        ratio: range of aspect ratio of the origin aspect ratio cropped
702
703
704
        interpolation: Default: PIL.Image.BILINEAR
    """

705
    def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):
706
        if isinstance(size, (tuple, list)):
707
708
709
710
711
712
            self.size = size
        else:
            self.size = (size, size)
        if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
            warnings.warn("range should be of kind (min, max)")

713
        self.interpolation = interpolation
714
715
        self.scale = scale
        self.ratio = ratio
716
717

    @staticmethod
718
    def get_params(img, scale, ratio):
719
720
721
722
        """Get parameters for ``crop`` for a random sized crop.

        Args:
            img (PIL Image): Image to be cropped.
723
724
            scale (tuple): range of size of the origin size cropped
            ratio (tuple): range of aspect ratio of the origin aspect ratio cropped
725
726
727
728
729

        Returns:
            tuple: params (i, j, h, w) to be passed to ``crop`` for a random
                sized crop.
        """
vfdev's avatar
vfdev committed
730
        width, height = F._get_image_size(img)
Zhicheng Yan's avatar
Zhicheng Yan committed
731
        area = height * width
732

733
        for _ in range(10):
734
            target_area = random.uniform(*scale) * area
735
736
            log_ratio = (math.log(ratio[0]), math.log(ratio[1]))
            aspect_ratio = math.exp(random.uniform(*log_ratio))
737
738
739
740

            w = int(round(math.sqrt(target_area * aspect_ratio)))
            h = int(round(math.sqrt(target_area / aspect_ratio)))

Zhicheng Yan's avatar
Zhicheng Yan committed
741
742
743
            if 0 < w <= width and 0 < h <= height:
                i = random.randint(0, height - h)
                j = random.randint(0, width - w)
744
745
                return i, j, h, w

746
        # Fallback to central crop
Zhicheng Yan's avatar
Zhicheng Yan committed
747
        in_ratio = float(width) / float(height)
748
        if (in_ratio < min(ratio)):
Zhicheng Yan's avatar
Zhicheng Yan committed
749
            w = width
750
            h = int(round(w / min(ratio)))
751
        elif (in_ratio > max(ratio)):
Zhicheng Yan's avatar
Zhicheng Yan committed
752
            h = height
753
            w = int(round(h * max(ratio)))
754
        else:  # whole image
Zhicheng Yan's avatar
Zhicheng Yan committed
755
756
757
758
            w = width
            h = height
        i = (height - h) // 2
        j = (width - w) // 2
759
        return i, j, h, w
760
761
762
763

    def __call__(self, img):
        """
        Args:
764
            img (PIL Image): Image to be cropped and resized.
765
766

        Returns:
767
            PIL Image: Randomly cropped and resized image.
768
        """
769
        i, j, h, w = self.get_params(img, self.scale, self.ratio)
770
771
        return F.resized_crop(img, i, j, h, w, self.size, self.interpolation)

772
    def __repr__(self):
773
774
        interpolate_str = _pil_interpolation_to_str[self.interpolation]
        format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
775
776
        format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale))
        format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio))
777
778
        format_string += ', interpolation={0})'.format(interpolate_str)
        return format_string
779

780
781
782
783
784
785
786
787
788
789
790

class RandomSizedCrop(RandomResizedCrop):
    """
    Note: This transform is deprecated in favor of RandomResizedCrop.
    """
    def __init__(self, *args, **kwargs):
        warnings.warn("The use of the transforms.RandomSizedCrop transform is deprecated, " +
                      "please use transforms.RandomResizedCrop instead.")
        super(RandomSizedCrop, self).__init__(*args, **kwargs)


vfdev's avatar
vfdev committed
791
792
793
794
795
class FiveCrop(torch.nn.Module):
    """Crop the given image into four corners and the central crop.
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading
    dimensions
796
797
798
799
800
801
802
803
804

    .. Note::
         This transform returns a tuple of images and there may be a mismatch in the number of
         inputs and targets your Dataset returns. See below for an example of how to deal with
         this.

    Args:
         size (sequence or int): Desired output size of the crop. If size is an ``int``
            instead of sequence like (h, w), a square crop of size (size, size) is made.
vfdev's avatar
vfdev committed
805
            If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
806
807
808
809
810
811
812
813
814
815
816
817
818
819

    Example:
         >>> transform = Compose([
         >>>    FiveCrop(size), # this is a list of PIL Images
         >>>    Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
         >>> ])
         >>> #In your test loop you can do the following:
         >>> input, target = batch # input is a 5d tensor, target is 2d
         >>> bs, ncrops, c, h, w = input.size()
         >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
         >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
    """

    def __init__(self, size):
vfdev's avatar
vfdev committed
820
        super().__init__()
821
822
        if isinstance(size, numbers.Number):
            self.size = (int(size), int(size))
vfdev's avatar
vfdev committed
823
824
        elif isinstance(size, Sequence) and len(size) == 1:
            self.size = (size[0], size[0])
825
        else:
vfdev's avatar
vfdev committed
826
827
828
            if len(size) != 2:
                raise ValueError("Please provide only two dimensions (h, w) for size.")

829
830
            self.size = size

vfdev's avatar
vfdev committed
831
832
833
834
835
836
837
838
    def forward(self, img):
        """
        Args:
            img (PIL Image or Tensor): Image to be cropped.

        Returns:
            tuple of 5 images. Image can be PIL Image or Tensor
        """
839
840
        return F.five_crop(img, self.size)

841
842
843
    def __repr__(self):
        return self.__class__.__name__ + '(size={0})'.format(self.size)

844

vfdev's avatar
vfdev committed
845
846
847
848
849
850
class TenCrop(torch.nn.Module):
    """Crop the given image into four corners and the central crop plus the flipped version of
    these (horizontal flipping is used by default).
    The image can be a PIL Image or a Tensor, in which case it is expected
    to have [..., H, W] shape, where ... means an arbitrary number of leading
    dimensions
851
852
853
854
855
856
857
858
859

    .. Note::
         This transform returns a tuple of images and there may be a mismatch in the number of
         inputs and targets your Dataset returns. See below for an example of how to deal with
         this.

    Args:
        size (sequence or int): Desired output size of the crop. If size is an
            int instead of sequence like (h, w), a square crop (size, size) is
vfdev's avatar
vfdev committed
860
            made. If provided a tuple or list of length 1, it will be interpreted as (size[0], size[0]).
861
        vertical_flip (bool): Use vertical flipping instead of horizontal
862
863
864
865
866
867
868
869
870
871
872
873
874
875

    Example:
         >>> transform = Compose([
         >>>    TenCrop(size), # this is a list of PIL Images
         >>>    Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor
         >>> ])
         >>> #In your test loop you can do the following:
         >>> input, target = batch # input is a 5d tensor, target is 2d
         >>> bs, ncrops, c, h, w = input.size()
         >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops
         >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops
    """

    def __init__(self, size, vertical_flip=False):
vfdev's avatar
vfdev committed
876
        super().__init__()
877
878
        if isinstance(size, numbers.Number):
            self.size = (int(size), int(size))
vfdev's avatar
vfdev committed
879
880
        elif isinstance(size, Sequence) and len(size) == 1:
            self.size = (size[0], size[0])
881
        else:
vfdev's avatar
vfdev committed
882
883
884
            if len(size) != 2:
                raise ValueError("Please provide only two dimensions (h, w) for size.")

885
886
887
            self.size = size
        self.vertical_flip = vertical_flip

vfdev's avatar
vfdev committed
888
889
890
891
892
893
894
895
    def forward(self, img):
        """
        Args:
            img (PIL Image or Tensor): Image to be cropped.

        Returns:
            tuple of 10 images. Image can be PIL Image or Tensor
        """
896
897
        return F.ten_crop(img, self.size, self.vertical_flip)

898
    def __repr__(self):
899
        return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip)
900

901

902
class LinearTransformation(object):
ekka's avatar
ekka committed
903
    """Transform a tensor image with a square transformation matrix and a mean_vector computed
904
    offline.
ekka's avatar
ekka committed
905
906
907
    Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and
    subtract mean_vector from it which is then followed by computing the dot
    product with the transformation matrix and then reshaping the tensor to its
908
    original shape.
909

910
    Applications:
911
        whitening transformation: Suppose X is a column vector zero-centered data.
912
913
914
        Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X),
        perform SVD on this matrix and pass it as transformation_matrix.

915
916
    Args:
        transformation_matrix (Tensor): tensor [D x D], D = C x H x W
ekka's avatar
ekka committed
917
        mean_vector (Tensor): tensor [D], D = C x H x W
918
919
    """

ekka's avatar
ekka committed
920
    def __init__(self, transformation_matrix, mean_vector):
921
922
923
        if transformation_matrix.size(0) != transformation_matrix.size(1):
            raise ValueError("transformation_matrix should be square. Got " +
                             "[{} x {}] rectangular matrix.".format(*transformation_matrix.size()))
ekka's avatar
ekka committed
924
925
926

        if mean_vector.size(0) != transformation_matrix.size(0):
            raise ValueError("mean_vector should have the same length {}".format(mean_vector.size(0)) +
Francisco Massa's avatar
Francisco Massa committed
927
928
                             " as any one of the dimensions of the transformation_matrix [{}]"
                             .format(tuple(transformation_matrix.size())))
ekka's avatar
ekka committed
929

930
        self.transformation_matrix = transformation_matrix
ekka's avatar
ekka committed
931
        self.mean_vector = mean_vector
932
933
934
935
936
937
938
939
940
941
942
943
944

    def __call__(self, tensor):
        """
        Args:
            tensor (Tensor): Tensor image of size (C, H, W) to be whitened.

        Returns:
            Tensor: Transformed image.
        """
        if tensor.size(0) * tensor.size(1) * tensor.size(2) != self.transformation_matrix.size(0):
            raise ValueError("tensor and transformation matrix have incompatible shape." +
                             "[{} x {} x {}] != ".format(*tensor.size()) +
                             "{}".format(self.transformation_matrix.size(0)))
ekka's avatar
ekka committed
945
        flat_tensor = tensor.view(1, -1) - self.mean_vector
946
947
948
949
        transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)
        tensor = transformed_tensor.view(tensor.size())
        return tensor

950
    def __repr__(self):
ekka's avatar
ekka committed
951
952
953
        format_string = self.__class__.__name__ + '(transformation_matrix='
        format_string += (str(self.transformation_matrix.tolist()) + ')')
        format_string += (", (mean_vector=" + str(self.mean_vector.tolist()) + ')')
954
955
        return format_string

956

957
class ColorJitter(torch.nn.Module):
958
959
960
    """Randomly change the brightness, contrast and saturation of an image.

    Args:
yaox12's avatar
yaox12 committed
961
962
963
964
965
966
967
968
969
970
971
972
        brightness (float or tuple of float (min, max)): How much to jitter brightness.
            brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness]
            or the given [min, max]. Should be non negative numbers.
        contrast (float or tuple of float (min, max)): How much to jitter contrast.
            contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast]
            or the given [min, max]. Should be non negative numbers.
        saturation (float or tuple of float (min, max)): How much to jitter saturation.
            saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation]
            or the given [min, max]. Should be non negative numbers.
        hue (float or tuple of float (min, max)): How much to jitter hue.
            hue_factor is chosen uniformly from [-hue, hue] or the given [min, max].
            Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5.
973
    """
974

975
    def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
976
        super().__init__()
yaox12's avatar
yaox12 committed
977
978
979
980
981
982
        self.brightness = self._check_input(brightness, 'brightness')
        self.contrast = self._check_input(contrast, 'contrast')
        self.saturation = self._check_input(saturation, 'saturation')
        self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5),
                                     clip_first_on_zero=False)

983
    @torch.jit.unused
yaox12's avatar
yaox12 committed
984
985
986
987
    def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True):
        if isinstance(value, numbers.Number):
            if value < 0:
                raise ValueError("If {} is a single number, it must be non negative.".format(name))
988
            value = [center - float(value), center + float(value)]
yaox12's avatar
yaox12 committed
989
            if clip_first_on_zero:
990
                value[0] = max(value[0], 0.0)
yaox12's avatar
yaox12 committed
991
992
993
994
995
996
997
998
999
1000
1001
        elif isinstance(value, (tuple, list)) and len(value) == 2:
            if not bound[0] <= value[0] <= value[1] <= bound[1]:
                raise ValueError("{} values should be between {}".format(name, bound))
        else:
            raise TypeError("{} should be a single number or a list/tuple with lenght 2.".format(name))

        # if value is 0 or (1., 1.) for brightness/contrast/saturation
        # or (0., 0.) for hue, do nothing
        if value[0] == value[1] == center:
            value = None
        return value
1002
1003

    @staticmethod
1004
    @torch.jit.unused
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
    def get_params(brightness, contrast, saturation, hue):
        """Get a randomized transform to be applied on image.

        Arguments are same as that of __init__.

        Returns:
            Transform which randomly adjusts brightness, contrast and
            saturation in a random order.
        """
        transforms = []
yaox12's avatar
yaox12 committed
1015
1016
1017

        if brightness is not None:
            brightness_factor = random.uniform(brightness[0], brightness[1])
1018
1019
            transforms.append(Lambda(lambda img: F.adjust_brightness(img, brightness_factor)))

yaox12's avatar
yaox12 committed
1020
1021
        if contrast is not None:
            contrast_factor = random.uniform(contrast[0], contrast[1])
1022
1023
            transforms.append(Lambda(lambda img: F.adjust_contrast(img, contrast_factor)))

yaox12's avatar
yaox12 committed
1024
1025
        if saturation is not None:
            saturation_factor = random.uniform(saturation[0], saturation[1])
1026
1027
            transforms.append(Lambda(lambda img: F.adjust_saturation(img, saturation_factor)))

yaox12's avatar
yaox12 committed
1028
1029
        if hue is not None:
            hue_factor = random.uniform(hue[0], hue[1])
1030
1031
            transforms.append(Lambda(lambda img: F.adjust_hue(img, hue_factor)))

vfdev's avatar
vfdev committed
1032
        random.shuffle(transforms)
1033
1034
1035
1036
        transform = Compose(transforms)

        return transform

1037
    def forward(self, img):
1038
1039
        """
        Args:
1040
            img (PIL Image or Tensor): Input image.
1041
1042

        Returns:
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
            PIL Image or Tensor: Color jittered image.
        """
        fn_idx = torch.randperm(4)
        for fn_id in fn_idx:
            if fn_id == 0 and self.brightness is not None:
                brightness = self.brightness
                brightness_factor = torch.tensor(1.0).uniform_(brightness[0], brightness[1]).item()
                img = F.adjust_brightness(img, brightness_factor)

            if fn_id == 1 and self.contrast is not None:
                contrast = self.contrast
                contrast_factor = torch.tensor(1.0).uniform_(contrast[0], contrast[1]).item()
                img = F.adjust_contrast(img, contrast_factor)

            if fn_id == 2 and self.saturation is not None:
                saturation = self.saturation
                saturation_factor = torch.tensor(1.0).uniform_(saturation[0], saturation[1]).item()
                img = F.adjust_saturation(img, saturation_factor)

            if fn_id == 3 and self.hue is not None:
                hue = self.hue
                hue_factor = torch.tensor(1.0).uniform_(hue[0], hue[1]).item()
                img = F.adjust_hue(img, hue_factor)

        return img
1068

1069
    def __repr__(self):
1070
1071
1072
1073
1074
1075
        format_string = self.__class__.__name__ + '('
        format_string += 'brightness={0}'.format(self.brightness)
        format_string += ', contrast={0}'.format(self.contrast)
        format_string += ', saturation={0}'.format(self.saturation)
        format_string += ', hue={0})'.format(self.hue)
        return format_string
1076

1077
1078
1079
1080
1081
1082
1083
1084
1085

class RandomRotation(object):
    """Rotate the image by angle.

    Args:
        degrees (sequence or float or int): Range of degrees to select from.
            If degrees is a number instead of sequence like (min, max), the range of degrees
            will be (-degrees, +degrees).
        resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
1086
            An optional resampling filter. See `filters`_ for more information.
1087
1088
1089
1090
1091
1092
1093
1094
            If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
        expand (bool, optional): Optional expansion flag.
            If true, expands the output to make it large enough to hold the entire rotated image.
            If false or omitted, make the output image the same size as the input image.
            Note that the expand flag assumes rotation around the center and no translation.
        center (2-tuple, optional): Optional center of rotation.
            Origin is the upper left corner.
            Default is the center of the image.
Philip Meier's avatar
Philip Meier committed
1095
1096
1097
        fill (n-tuple or int or float): Pixel fill value for area outside the rotated
            image. If int or float, the value is used for all bands respectively.
            Defaults to 0 for all bands. This option is only available for ``pillow>=5.2.0``.
1098
1099
1100

    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters

1101
1102
    """

Philip Meier's avatar
Philip Meier committed
1103
    def __init__(self, degrees, resample=False, expand=False, center=None, fill=None):
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
        if isinstance(degrees, numbers.Number):
            if degrees < 0:
                raise ValueError("If degrees is a single number, it must be positive.")
            self.degrees = (-degrees, degrees)
        else:
            if len(degrees) != 2:
                raise ValueError("If degrees is a sequence, it must be of len 2.")
            self.degrees = degrees

        self.resample = resample
        self.expand = expand
        self.center = center
1116
        self.fill = fill
1117
1118
1119
1120
1121
1122
1123
1124

    @staticmethod
    def get_params(degrees):
        """Get parameters for ``rotate`` for a random rotation.

        Returns:
            sequence: params to be passed to ``rotate`` for random rotation.
        """
vfdev's avatar
vfdev committed
1125
        angle = random.uniform(degrees[0], degrees[1])
1126
1127
1128
1129
1130

        return angle

    def __call__(self, img):
        """
1131
        Args:
1132
1133
1134
1135
1136
1137
1138
1139
            img (PIL Image): Image to be rotated.

        Returns:
            PIL Image: Rotated image.
        """

        angle = self.get_params(self.degrees)

1140
        return F.rotate(img, angle, self.resample, self.expand, self.center, self.fill)
1141

1142
    def __repr__(self):
1143
1144
1145
1146
1147
        format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees)
        format_string += ', resample={0}'.format(self.resample)
        format_string += ', expand={0}'.format(self.expand)
        if self.center is not None:
            format_string += ', center={0}'.format(self.center)
1148
1149
        if self.fill is not None:
            format_string += ', fill={0}'.format(self.fill)
1150
1151
        format_string += ')'
        return format_string
1152

1153

1154
1155
1156
1157
1158
1159
class RandomAffine(object):
    """Random affine transformation of the image keeping center invariant

    Args:
        degrees (sequence or float or int): Range of degrees to select from.
            If degrees is a number instead of sequence like (min, max), the range of degrees
1160
            will be (-degrees, +degrees). Set to 0 to deactivate rotations.
1161
1162
1163
1164
1165
1166
1167
        translate (tuple, optional): tuple of maximum absolute fraction for horizontal
            and vertical translations. For example translate=(a, b), then horizontal shift
            is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is
            randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default.
        scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is
            randomly sampled from the range a <= scale <= b. Will keep original scale by default.
        shear (sequence or float or int, optional): Range of degrees to select from.
ptrblck's avatar
ptrblck committed
1168
1169
1170
1171
1172
            If shear is a number, a shear parallel to the x axis in the range (-shear, +shear)
            will be apllied. Else if shear is a tuple or list of 2 values a shear parallel to the x axis in the
            range (shear[0], shear[1]) will be applied. Else if shear is a tuple or list of 4 values,
            a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied.
            Will not apply shear by default
1173
        resample ({PIL.Image.NEAREST, PIL.Image.BILINEAR, PIL.Image.BICUBIC}, optional):
1174
            An optional resampling filter. See `filters`_ for more information.
1175
            If omitted, or if the image has mode "1" or "P", it is set to PIL.Image.NEAREST.
Surgan Jandial's avatar
Surgan Jandial committed
1176
1177
        fillcolor (tuple or int): Optional fill color (Tuple for RGB Image And int for grayscale) for the area
            outside the transform in the output image.(Pillow>=5.0.0)
1178
1179
1180

    .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters

1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
    """

    def __init__(self, degrees, translate=None, scale=None, shear=None, resample=False, fillcolor=0):
        if isinstance(degrees, numbers.Number):
            if degrees < 0:
                raise ValueError("If degrees is a single number, it must be positive.")
            self.degrees = (-degrees, degrees)
        else:
            assert isinstance(degrees, (tuple, list)) and len(degrees) == 2, \
                "degrees should be a list or tuple and it must be of length 2."
            self.degrees = degrees

        if translate is not None:
            assert isinstance(translate, (tuple, list)) and len(translate) == 2, \
                "translate should be a list or tuple and it must be of length 2."
            for t in translate:
                if not (0.0 <= t <= 1.0):
                    raise ValueError("translation values should be between 0 and 1")
        self.translate = translate

        if scale is not None:
            assert isinstance(scale, (tuple, list)) and len(scale) == 2, \
                "scale should be a list or tuple and it must be of length 2."
            for s in scale:
                if s <= 0:
                    raise ValueError("scale values should be positive")
        self.scale = scale

        if shear is not None:
            if isinstance(shear, numbers.Number):
                if shear < 0:
                    raise ValueError("If shear is a single number, it must be positive.")
                self.shear = (-shear, shear)
            else:
ptrblck's avatar
ptrblck committed
1215
1216
1217
1218
1219
1220
1221
1222
                assert isinstance(shear, (tuple, list)) and \
                    (len(shear) == 2 or len(shear) == 4), \
                    "shear should be a list or tuple and it must be of length 2 or 4."
                # X-Axis shear with [min, max]
                if len(shear) == 2:
                    self.shear = [shear[0], shear[1], 0., 0.]
                elif len(shear) == 4:
                    self.shear = [s for s in shear]
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
        else:
            self.shear = shear

        self.resample = resample
        self.fillcolor = fillcolor

    @staticmethod
    def get_params(degrees, translate, scale_ranges, shears, img_size):
        """Get parameters for affine transformation

        Returns:
            sequence: params to be passed to the affine transformation
        """
        angle = random.uniform(degrees[0], degrees[1])
        if translate is not None:
            max_dx = translate[0] * img_size[0]
            max_dy = translate[1] * img_size[1]
            translations = (np.round(random.uniform(-max_dx, max_dx)),
                            np.round(random.uniform(-max_dy, max_dy)))
        else:
            translations = (0, 0)

        if scale_ranges is not None:
            scale = random.uniform(scale_ranges[0], scale_ranges[1])
        else:
            scale = 1.0

        if shears is not None:
ptrblck's avatar
ptrblck committed
1251
1252
1253
1254
1255
            if len(shears) == 2:
                shear = [random.uniform(shears[0], shears[1]), 0.]
            elif len(shears) == 4:
                shear = [random.uniform(shears[0], shears[1]),
                         random.uniform(shears[2], shears[3])]
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
        else:
            shear = 0.0

        return angle, translations, scale, shear

    def __call__(self, img):
        """
            img (PIL Image): Image to be transformed.

        Returns:
            PIL Image: Affine transformed image.
        """
        ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img.size)
        return F.affine(img, *ret, resample=self.resample, fillcolor=self.fillcolor)

    def __repr__(self):
        s = '{name}(degrees={degrees}'
        if self.translate is not None:
            s += ', translate={translate}'
        if self.scale is not None:
            s += ', scale={scale}'
        if self.shear is not None:
            s += ', shear={shear}'
        if self.resample > 0:
            s += ', resample={resample}'
        if self.fillcolor != 0:
            s += ', fillcolor={fillcolor}'
        s += ')'
        d = dict(self.__dict__)
        d['resample'] = _pil_interpolation_to_str[d['resample']]
        return s.format(name=self.__class__.__name__, **d)


1289
1290
class Grayscale(object):
    """Convert image to grayscale.
1291

1292
1293
1294
1295
    Args:
        num_output_channels (int): (1 or 3) number of channels desired for output image

    Returns:
1296
        PIL Image: Grayscale version of the input.
1297
1298
         - If ``num_output_channels == 1`` : returned image is single channel
         - If ``num_output_channels == 3`` : returned image is 3 channel with r == g == b
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314

    """

    def __init__(self, num_output_channels=1):
        self.num_output_channels = num_output_channels

    def __call__(self, img):
        """
        Args:
            img (PIL Image): Image to be converted to grayscale.

        Returns:
            PIL Image: Randomly grayscaled image.
        """
        return F.to_grayscale(img, num_output_channels=self.num_output_channels)

1315
    def __repr__(self):
1316
        return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels)
1317

1318
1319
1320

class RandomGrayscale(object):
    """Randomly convert image to grayscale with a probability of p (default 0.1).
1321

1322
1323
1324
1325
    Args:
        p (float): probability that image should be converted to grayscale.

    Returns:
1326
1327
1328
1329
        PIL Image: Grayscale version of the input image with probability p and unchanged
        with probability (1-p).
        - If input image is 1 channel: grayscale version is 1 channel
        - If input image is 3 channel: grayscale version is 3 channel with r == g == b
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347

    """

    def __init__(self, p=0.1):
        self.p = p

    def __call__(self, img):
        """
        Args:
            img (PIL Image): Image to be converted to grayscale.

        Returns:
            PIL Image: Randomly grayscaled image.
        """
        num_output_channels = 1 if img.mode == 'L' else 3
        if random.random() < self.p:
            return F.to_grayscale(img, num_output_channels=num_output_channels)
        return img
1348
1349

    def __repr__(self):
1350
        return self.__class__.__name__ + '(p={0})'.format(self.p)
1351
1352


1353
class RandomErasing(torch.nn.Module):
1354
    """ Randomly selects a rectangle region in an image and erases its pixels.
1355
1356
    'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/pdf/1708.04896.pdf

1357
1358
1359
1360
1361
1362
1363
1364
    Args:
         p: probability that the random erasing operation will be performed.
         scale: range of proportion of erased area against input image.
         ratio: range of aspect ratio of erased area.
         value: erasing value. Default is 0. If a single int, it is used to
            erase all pixels. If a tuple of length 3, it is used to erase
            R, G, B channels respectively.
            If a str of 'random', erasing each pixel with random values.
Zhun Zhong's avatar
Zhun Zhong committed
1365
         inplace: boolean to make this transform inplace. Default set to False.
1366

1367
1368
    Returns:
        Erased Image.
1369

1370
1371
    # Examples:
        >>> transform = transforms.Compose([
1372
1373
1374
1375
        >>>   transforms.RandomHorizontalFlip(),
        >>>   transforms.ToTensor(),
        >>>   transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
        >>>   transforms.RandomErasing(),
1376
1377
1378
        >>> ])
    """

Zhun Zhong's avatar
Zhun Zhong committed
1379
    def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False):
1380
1381
1382
1383
1384
1385
1386
1387
1388
        super().__init__()
        if not isinstance(value, (numbers.Number, str, tuple, list)):
            raise TypeError("Argument value should be either a number or str or a sequence")
        if isinstance(value, str) and value != "random":
            raise ValueError("If value is str, it should be 'random'")
        if not isinstance(scale, (tuple, list)):
            raise TypeError("Scale should be a sequence")
        if not isinstance(ratio, (tuple, list)):
            raise TypeError("Ratio should be a sequence")
1389
        if (scale[0] > scale[1]) or (ratio[0] > ratio[1]):
1390
            warnings.warn("Scale and ratio should be of kind (min, max)")
1391
        if scale[0] < 0 or scale[1] > 1:
1392
            raise ValueError("Scale should be between 0 and 1")
1393
        if p < 0 or p > 1:
1394
            raise ValueError("Random erasing probability should be between 0 and 1")
1395
1396
1397
1398
1399

        self.p = p
        self.scale = scale
        self.ratio = ratio
        self.value = value
1400
        self.inplace = inplace
1401
1402

    @staticmethod
1403
1404
1405
    def get_params(
            img: Tensor, scale: Tuple[float, float], ratio: Tuple[float, float], value: Optional[List[float]] = None
    ) -> Tuple[int, int, int, int, Tensor]:
1406
1407
1408
1409
        """Get parameters for ``erase`` for a random erasing.

        Args:
            img (Tensor): Tensor image of size (C, H, W) to be erased.
1410
1411
1412
1413
1414
            scale (tuple or list): range of proportion of erased area against input image.
            ratio (tuple or list): range of aspect ratio of erased area.
            value (list, optional): erasing value. If None, it is interpreted as "random"
                (erasing each pixel with random values). If ``len(value)`` is 1, it is interpreted as a number,
                i.e. ``value[0]``.
1415
1416
1417
1418

        Returns:
            tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing.
        """
Zhun Zhong's avatar
Zhun Zhong committed
1419
        img_c, img_h, img_w = img.shape
1420
        area = img_h * img_w
1421

1422
        for _ in range(10):
1423
1424
            erase_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item()
            aspect_ratio = torch.empty(1).uniform_(ratio[0], ratio[1]).item()
1425
1426
1427

            h = int(round(math.sqrt(erase_area * aspect_ratio)))
            w = int(round(math.sqrt(erase_area / aspect_ratio)))
1428
1429
1430
1431
1432
1433
1434
            if not (h < img_h and w < img_w):
                continue

            if value is None:
                v = torch.empty([img_c, h, w], dtype=torch.float32).normal_()
            else:
                v = torch.tensor(value)[:, None, None]
1435

1436
1437
1438
            i = torch.randint(0, img_h - h, size=(1, )).item()
            j = torch.randint(0, img_w - w, size=(1, )).item()
            return i, j, h, w, v
1439

Zhun Zhong's avatar
Zhun Zhong committed
1440
1441
1442
        # Return original image
        return 0, 0, img_h, img_w, img

1443
    def forward(self, img):
1444
1445
1446
1447
1448
1449
1450
        """
        Args:
            img (Tensor): Tensor image of size (C, H, W) to be erased.

        Returns:
            img (Tensor): Erased Tensor image.
        """
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
        if torch.rand(1) < self.p:

            # cast self.value to script acceptable type
            if isinstance(self.value, (int, float)):
                value = [self.value, ]
            elif isinstance(self.value, str):
                value = None
            elif isinstance(self.value, tuple):
                value = list(self.value)
            else:
                value = self.value

            if value is not None and not (len(value) in (1, img.shape[-3])):
                raise ValueError(
                    "If value is a sequence, it should have either a single value or "
                    "{} (number of input channels)".format(img.shape[-3])
                )

            x, y, h, w, v = self.get_params(img, scale=self.scale, ratio=self.ratio, value=value)
1470
            return F.erase(img, x, y, h, w, v, self.inplace)
1471
        return img