transforms.py 34.7 KB
Newer Older
1
from __future__ import division
soumith's avatar
soumith committed
2
3
4
import torch
import math
import random
5
from PIL import Image, ImageOps, ImageEnhance
6
7
8
9
try:
    import accimage
except ImportError:
    accimage = None
10
import numpy as np
11
import numbers
Soumith Chintala's avatar
Soumith Chintala committed
12
import types
13
import collections
14
import warnings
soumith's avatar
soumith committed
15

16

Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
def _is_pil_image(img):
    if accimage is not None:
        return isinstance(img, (Image.Image, accimage.Image))
    else:
        return isinstance(img, Image.Image)


def _is_tensor_image(img):
    return torch.is_tensor(img) and img.ndimension() == 3


def _is_numpy_image(img):
    return isinstance(img, np.ndarray) and (img.ndim in {2, 3})


Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
32
def to_tensor(pic):
33
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
34
35
36
37

    See ``ToTensor`` for more details.

    Args:
38
        pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
39
40
41
42

    Returns:
        Tensor: Converted image.
    """
43
44
    if not(_is_pil_image(pic) or _is_numpy_image(pic)):
        raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
45

Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
    if isinstance(pic, np.ndarray):
        # handle numpy array
        img = torch.from_numpy(pic.transpose((2, 0, 1)))
        # backward compatibility
        return img.float().div(255)

    if accimage is not None and isinstance(pic, accimage.Image):
        nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32)
        pic.copyto(nppic)
        return torch.from_numpy(nppic)

    # handle PIL Image
    if pic.mode == 'I':
        img = torch.from_numpy(np.array(pic, np.int32, copy=False))
    elif pic.mode == 'I;16':
        img = torch.from_numpy(np.array(pic, np.int16, copy=False))
    else:
        img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
    # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
    if pic.mode == 'YCbCr':
        nchannel = 3
    elif pic.mode == 'I;16':
        nchannel = 1
    else:
        nchannel = len(pic.mode)
    img = img.view(pic.size[1], pic.size[0], nchannel)
    # put it from HWC to CHW format
    # yikes, this transpose takes 80% of the loading time/CPU
    img = img.transpose(0, 1).transpose(0, 2).contiguous()
    if isinstance(img, torch.ByteTensor):
        return img.float().div(255)
    else:
        return img


81
def to_pil_image(pic, mode=None):
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
82
83
    """Convert a tensor or an ndarray to PIL Image.

84
    See :class:`~torchvision.transforms.ToPIlImage` for more details.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
85
86

    Args:
87
        pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
88
89
90
        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).

    .. _PIL.Image mode: http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#modes
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
91
92

    Returns:
93
        PIL Image: Image converted to PIL Image.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
94
    """
95
96
    if not(_is_numpy_image(pic) or _is_tensor_image(pic)):
        raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic)))
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
97

Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
98
99
100
101
102
    npimg = pic
    if isinstance(pic, torch.FloatTensor):
        pic = pic.mul(255).byte()
    if torch.is_tensor(pic):
        npimg = np.transpose(pic.numpy(), (1, 2, 0))
103
104
105
106
107

    if not isinstance(npimg, np.ndarray):
        raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' +
                        'not {}'.format(type(npimg)))

Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
108
    if npimg.shape[2] == 1:
109
        expected_mode = None
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
110
111
        npimg = npimg[:, :, 0]
        if npimg.dtype == np.uint8:
112
            expected_mode = 'L'
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
113
        if npimg.dtype == np.int16:
114
            expected_mode = 'I;16'
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
115
        if npimg.dtype == np.int32:
116
            expected_mode = 'I'
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
117
        elif npimg.dtype == np.float32:
118
119
120
121
122
123
            expected_mode = 'F'
        if mode is not None and mode != expected_mode:
            raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}"
                             .format(mode, np.dtype, expected_mode))
        mode = expected_mode

124
    elif npimg.shape[2] == 4:
125
126
127
128
129
130
        permitted_4_channel_modes = ['RGBA', 'CMYK']
        if mode is not None and mode not in permitted_4_channel_modes:
            raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes))

        if mode is None and npimg.dtype == np.uint8:
            mode = 'RGBA'
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
131
    else:
132
133
134
135
        permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV']
        if mode is not None and mode not in permitted_3_channel_modes:
            raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes))
        if mode is None and npimg.dtype == np.uint8:
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
136
            mode = 'RGB'
137
138
139
140

    if mode is None:
        raise TypeError('Input type {} is not supported'.format(npimg.dtype))

Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
141
142
143
144
    return Image.fromarray(npimg, mode=mode)


def normalize(tensor, mean, std):
145
    """Normalize a tensor image with mean and standard deviation.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
146
147
148
149
150

    See ``Normalize`` for more details.

    Args:
        tensor (Tensor): Tensor image of size (C, H, W) to be normalized.
151
152
        mean (sequence): Sequence of means for each channel.
        std (sequence): Sequence of standard deviations for each channely.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
153
154

    Returns:
155
        Tensor: Normalized Tensor image.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
156
    """
157
158
    if not _is_tensor_image(tensor):
        raise TypeError('tensor is not a torch image.')
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
159
160
161
162
163
164
    # TODO: make efficient
    for t, m, s in zip(tensor, mean, std):
        t.sub_(m).div_(s)
    return tensor


165
def resize(img, size, interpolation=Image.BILINEAR):
166
    """Resize the input PIL Image to the given size.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
167
168

    Args:
169
        img (PIL Image): Image to be resized.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
170
        size (sequence or int): Desired output size. If size is a sequence like
171
172
173
            (h, w), the output size will be matched to this. If size is an int,
            the smaller edge of the image will be matched to this number maintaing
            the aspect ratio. i.e, if height > width, then image will be rescaled to
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
174
175
176
177
178
            (size * height / width, size)
        interpolation (int, optional): Desired interpolation. Default is
            ``PIL.Image.BILINEAR``

    Returns:
179
        PIL Image: Resized image.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
180
    """
181
182
183
184
185
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))
    if not (isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)):
        raise TypeError('Got inappropriate size arg: {}'.format(size))

Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
186
187
188
189
190
191
192
193
194
195
196
197
198
    if isinstance(size, int):
        w, h = img.size
        if (w <= h and w == size) or (h <= w and h == size):
            return img
        if w < h:
            ow = size
            oh = int(size * h / w)
            return img.resize((ow, oh), interpolation)
        else:
            oh = size
            ow = int(size * w / h)
            return img.resize((ow, oh), interpolation)
    else:
199
        return img.resize(size[::-1], interpolation)
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
200
201


202
203
204
205
206
207
def scale(*args, **kwargs):
    warnings.warn("The use of the transforms.Scale transform is deprecated, " +
                  "please use transforms.Resize instead.")
    return resize(*args, **kwargs)


Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
208
def pad(img, padding, fill=0):
209
    """Pad the given PIL Image on all sides with the given "pad" value.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
210
211

    Args:
212
        img (PIL Image): Image to be padded.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
213
214
215
216
217
218
219
220
221
        padding (int or tuple): Padding on each border. If a single int is provided this
            is used to pad all borders. If tuple of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a tuple of length 4 is provided
            this is the padding for the left, top, right and bottom borders
            respectively.
        fill: Pixel fill value. Default is 0. If a tuple of
            length 3, it is used to fill R, G, B channels respectively.

    Returns:
222
        PIL Image: Padded image.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
223
    """
224
225
226
227
228
229
230
231
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    if not isinstance(padding, (numbers.Number, tuple)):
        raise TypeError('Got inappropriate padding arg')
    if not isinstance(fill, (numbers.Number, str, tuple)):
        raise TypeError('Got inappropriate fill arg')

Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
232
233
234
235
    if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]:
        raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
                         "{} element tuple".format(len(padding)))

Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
236
237
238
    return ImageOps.expand(img, border=padding, fill=fill)


239
def crop(img, i, j, h, w):
240
    """Crop the given PIL Image.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
241
242

    Args:
243
        img (PIL Image): Image to be cropped.
244
245
        i: Upper pixel coordinate.
        j: Left pixel coordinate.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
246
        h: Height of the cropped image.
247
        w: Width of the cropped image.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
248
249

    Returns:
250
        PIL Image: Cropped image.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
251
    """
252
253
254
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

255
    return img.crop((j, i, j + w, i + h))
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
256
257


258
def resized_crop(img, i, j, h, w, size, interpolation=Image.BILINEAR):
259
    """Crop the given PIL Image and resize it to desired size.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
260

261
    Notably used in RandomResizedCrop.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
262
263

    Args:
264
        img (PIL Image): Image to be cropped.
265
266
        i: Upper pixel coordinate.
        j: Left pixel coordinate.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
267
        h: Height of the cropped image.
268
        w: Width of the cropped image.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
269
270
271
272
        size (sequence or int): Desired output size. Same semantics as ``scale``.
        interpolation (int, optional): Desired interpolation. Default is
            ``PIL.Image.BILINEAR``.
    Returns:
273
        PIL Image: Cropped image.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
274
    """
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
275
    assert _is_pil_image(img), 'img should be PIL Image'
276
    img = crop(img, i, j, h, w)
277
    img = resize(img, size, interpolation)
278
    return img
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
279
280
281


def hflip(img):
282
    """Horizontally flip the given PIL Image.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
283
284

    Args:
285
        img (PIL Image): Image to be flipped.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
286
287

    Returns:
288
        PIL Image:  Horizontall flipped image.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
289
    """
290
291
292
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
293
294
295
    return img.transpose(Image.FLIP_LEFT_RIGHT)


296
def vflip(img):
297
    """Vertically flip the given PIL Image.
298
299

    Args:
300
        img (PIL Image): Image to be flipped.
301
302

    Returns:
303
        PIL Image:  Vertically flipped image.
304
305
306
307
308
309
310
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    return img.transpose(Image.FLIP_TOP_BOTTOM)


311
def five_crop(img, size):
312
    """Crop the given PIL Image into four corners and the central crop.
313

314
315
316
    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344

    Args:
       size (sequence or int): Desired output size of the crop. If size is an
           int instead of sequence like (h, w), a square crop (size, size) is
           made.
    Returns:
        tuple: tuple (tl, tr, bl, br, center) corresponding top left,
            top right, bottom left, bottom right and center crop.
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
    else:
        assert len(size) == 2, "Please provide only two dimensions (h, w) for size."

    w, h = img.size
    crop_h, crop_w = size
    if crop_w > w or crop_h > h:
        raise ValueError("Requested crop size {} is bigger than input size {}".format(size,
                                                                                      (h, w)))
    tl = img.crop((0, 0, crop_w, crop_h))
    tr = img.crop((w - crop_w, 0, w, crop_h))
    bl = img.crop((0, h - crop_h, crop_w, h))
    br = img.crop((w - crop_w, h - crop_h, w, h))
    center = CenterCrop((crop_h, crop_w))(img)
    return (tl, tr, bl, br, center)


def ten_crop(img, size, vertical_flip=False):
345
    """Crop the given PIL Image into four corners and the central crop plus the
346
347
       flipped version of these (horizontal flipping is used by default).

348
349
350
    .. Note::
        This transform returns a tuple of images and there may be a
        mismatch in the number of inputs and targets your ``Dataset`` returns.
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379

       Args:
           size (sequence or int): Desired output size of the crop. If size is an
               int instead of sequence like (h, w), a square crop (size, size) is
               made.
           vertical_flip (bool): Use vertical flipping instead of horizontal

        Returns:
            tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip,
                br_flip, center_flip) corresponding top left, top right,
                bottom left, bottom right and center crop and same for the
                flipped image.
    """
    if isinstance(size, numbers.Number):
        size = (int(size), int(size))
    else:
        assert len(size) == 2, "Please provide only two dimensions (h, w) for size."

    first_five = five_crop(img, size)

    if vertical_flip:
        img = vflip(img)
    else:
        img = hflip(img)

    second_five = five_crop(img, size)
    return first_five + second_five


380
381
382
383
def adjust_brightness(img, brightness_factor):
    """Adjust brightness of an Image.

    Args:
384
        img (PIL Image): PIL Image to be adjusted.
385
386
387
388
389
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.

    Returns:
390
        PIL Image: Brightness adjusted image.
391
392
393
394
395
396
397
398
399
400
401
402
403
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    enhancer = ImageEnhance.Brightness(img)
    img = enhancer.enhance(brightness_factor)
    return img


def adjust_contrast(img, contrast_factor):
    """Adjust contrast of an Image.

    Args:
404
        img (PIL Image): PIL Image to be adjusted.
405
406
407
408
409
        contrast_factor (float): How much to adjust the contrast. Can be any
            non negative number. 0 gives a solid gray image, 1 gives the
            original image while 2 increases the contrast by a factor of 2.

    Returns:
410
        PIL Image: Contrast adjusted image.
411
412
413
414
415
416
417
418
419
420
421
422
423
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    enhancer = ImageEnhance.Contrast(img)
    img = enhancer.enhance(contrast_factor)
    return img


def adjust_saturation(img, saturation_factor):
    """Adjust color saturation of an image.

    Args:
424
        img (PIL Image): PIL Image to be adjusted.
425
426
427
428
429
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a black and white image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.

    Returns:
430
        PIL Image: Saturation adjusted image.
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    enhancer = ImageEnhance.Color(img)
    img = enhancer.enhance(saturation_factor)
    return img


def adjust_hue(img, hue_factor):
    """Adjust hue of an image.

    The image hue is adjusted by converting the image to HSV and
    cyclically shifting the intensities in the hue channel (H).
    The image is then converted back to original image mode.

    `hue_factor` is the amount of shift in H channel and must be in the
    interval `[-0.5, 0.5]`.

    See https://en.wikipedia.org/wiki/Hue for more details on Hue.

    Args:
453
        img (PIL Image): PIL Image to be adjusted.
454
455
456
457
458
459
460
        hue_factor (float):  How much to shift the hue channel. Should be in
            [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in
            HSV space in positive and negative direction respectively.
            0 means no shift. Therefore, both -0.5 and 0.5 will give an image
            with complementary colors while 0 gives the original image.

    Returns:
461
        PIL Image: Hue adjusted image.
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
    """
    if not(-0.5 <= hue_factor <= 0.5):
        raise ValueError('hue_factor is not in [-0.5, 0.5].'.format(hue_factor))

    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    input_mode = img.mode
    if input_mode in {'L', '1', 'I', 'F'}:
        return img

    h, s, v = img.convert('HSV').split()

    np_h = np.array(h, dtype=np.uint8)
    # uint8 addition take cares of rotation across boundaries
    with np.errstate(over='ignore'):
        np_h += np.uint8(hue_factor * 255)
    h = Image.fromarray(np_h, 'L')

    img = Image.merge('HSV', (h, s, v)).convert(input_mode)
    return img


def adjust_gamma(img, gamma, gain=1):
    """Perform gamma correction on an image.

    Also known as Power Law Transform. Intensities in RGB mode are adjusted
    based on the following equation:

        I_out = 255 * gain * ((I_in / 255) ** gamma)

    See https://en.wikipedia.org/wiki/Gamma_correction for more details.

    Args:
496
        img (PIL Image): PIL Image to be adjusted.
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
        gamma (float): Non negative real number. gamma larger than 1 make the
            shadows darker, while gamma smaller than 1 make dark regions
            lighter.
        gain (float): The constant multiplier.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    if gamma < 0:
        raise ValueError('Gamma should be a non-negative real number')

    input_mode = img.mode
    img = img.convert('RGB')

    np_img = np.array(img, dtype=np.float32)
    np_img = 255 * gain * ((np_img / 255) ** gamma)
    np_img = np.uint8(np.clip(np_img, 0, 255))

    img = Image.fromarray(np_img, 'RGB').convert(input_mode)
    return img


soumith's avatar
soumith committed
519
class Compose(object):
Adam Paszke's avatar
Adam Paszke committed
520
521
522
    """Composes several transforms together.

    Args:
523
        transforms (list of ``Transform`` objects): list of transforms to compose.
Adam Paszke's avatar
Adam Paszke committed
524
525
526
527
528
529

    Example:
        >>> transforms.Compose([
        >>>     transforms.CenterCrop(10),
        >>>     transforms.ToTensor(),
        >>> ])
530
    """
531

soumith's avatar
soumith committed
532
533
534
535
536
537
538
539
540
541
    def __init__(self, transforms):
        self.transforms = transforms

    def __call__(self, img):
        for t in self.transforms:
            img = t(img)
        return img


class ToTensor(object):
542
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
543

544
    Converts a PIL Image or numpy.ndarray (H x W x C) in the range
Adam Paszke's avatar
Adam Paszke committed
545
546
    [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].
    """
547

soumith's avatar
soumith committed
548
    def __call__(self, pic):
549
550
        """
        Args:
551
            pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
552
553
554
555

        Returns:
            Tensor: Converted image.
        """
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
556
        return to_tensor(pic)
557

Adam Paszke's avatar
Adam Paszke committed
558

559
class ToPILImage(object):
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
560
    """Convert a tensor or an ndarray to PIL Image.
561
562

    Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape
563
    H x W x C to a PIL Image while preserving the value range.
564
565
566
567
568
569
570
571
572
573

    Args:
        mode (`PIL.Image mode`_): color space and pixel depth of input data (optional).
            If ``mode`` is ``None`` (default) there are some assumptions made about the input data:
            1. If the input has 3 channels, the ``mode`` is assumed to be ``RGB``.
            2. If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``.
            3. If the input has 1 channel, the ``mode`` is determined by the data type (i,e,
            ``int``, ``float``, ``short``).

    .. _PIL.Image mode: http://pillow.readthedocs.io/en/3.4.x/handbook/concepts.html#modes
574
    """
575
576
    def __init__(self, mode=None):
        self.mode = mode
577

578
    def __call__(self, pic):
579
580
        """
        Args:
581
            pic (Tensor or numpy.ndarray): Image to be converted to PIL Image.
582
583

        Returns:
584
            PIL Image: Image converted to PIL Image.
585
586

        """
587
        return to_pil_image(pic, self.mode)
588

soumith's avatar
soumith committed
589
590

class Normalize(object):
591
    """Normalize an tensor image with mean and standard deviation.
592
593
594
    Given mean: ``(M1,...,Mn)`` and std: ``(M1,..,Mn)`` for ``n`` channels, this transform
    will normalize each channel of the input ``torch.*Tensor`` i.e.
    ``input[channel] = (input[channel] - mean[channel]) / std[channel]``
595
596

    Args:
597
598
        mean (sequence): Sequence of means for each channel.
        std (sequence): Sequence of standard deviations for each channel.
599
    """
600

soumith's avatar
soumith committed
601
602
603
604
605
    def __init__(self, mean, std):
        self.mean = mean
        self.std = std

    def __call__(self, tensor):
606
607
608
609
610
        """
        Args:
            tensor (Tensor): Tensor image of size (C, H, W) to be normalized.

        Returns:
611
            Tensor: Normalized Tensor image.
612
        """
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
613
        return normalize(tensor, self.mean, self.std)
soumith's avatar
soumith committed
614
615


616
class Resize(object):
617
    """Resize the input PIL Image to the given size.
618
619
620

    Args:
        size (sequence or int): Desired output size. If size is a sequence like
621
            (h, w), output size will be matched to this. If size is an int,
622
623
624
625
626
            smaller edge of the image will be matched to this number.
            i.e, if height > width, then image will be rescaled to
            (size * height / width, size)
        interpolation (int, optional): Desired interpolation. Default is
            ``PIL.Image.BILINEAR``
627
    """
628

soumith's avatar
soumith committed
629
    def __init__(self, size, interpolation=Image.BILINEAR):
630
        assert isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)
soumith's avatar
soumith committed
631
632
633
634
        self.size = size
        self.interpolation = interpolation

    def __call__(self, img):
635
636
        """
        Args:
637
            img (PIL Image): Image to be scaled.
638
639

        Returns:
640
            PIL Image: Rescaled image.
641
        """
642
643
644
645
        return resize(img, self.size, self.interpolation)


class Scale(Resize):
646
647
648
    """
    Note: This transform is deprecated in favor of Resize.
    """
649
650
651
652
    def __init__(self, *args, **kwargs):
        warnings.warn("The use of the transforms.Scale transform is deprecated, " +
                      "please use transforms.Resize instead.")
        super(Scale, self).__init__(*args, **kwargs)
soumith's avatar
soumith committed
653
654
655


class CenterCrop(object):
656
    """Crops the given PIL Image at the center.
657
658
659

    Args:
        size (sequence or int): Desired output size of the crop. If size is an
660
            int instead of sequence like (h, w), a square crop (size, size) is
661
            made.
662
    """
663

soumith's avatar
soumith committed
664
    def __init__(self, size):
665
666
667
668
        if isinstance(size, numbers.Number):
            self.size = (int(size), int(size))
        else:
            self.size = size
soumith's avatar
soumith committed
669

670
671
    @staticmethod
    def get_params(img, output_size):
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
672
673
674
        """Get parameters for ``crop`` for center crop.

        Args:
675
            img (PIL Image): Image to be cropped.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
676
677
678
            output_size (tuple): Expected output size of the crop.

        Returns:
679
            tuple: params (i, j, h, w) to be passed to ``crop`` for center crop.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
680
        """
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
681
        w, h = img.size
682
        th, tw = output_size
683
684
685
        i = int(round((h - th) / 2.))
        j = int(round((w - tw) / 2.))
        return i, j, th, tw
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
686

soumith's avatar
soumith committed
687
    def __call__(self, img):
688
689
        """
        Args:
690
            img (PIL Image): Image to be cropped.
691
692

        Returns:
693
            PIL Image: Cropped image.
694
        """
695
696
        i, j, h, w = self.get_params(img, self.size)
        return crop(img, i, j, h, w)
soumith's avatar
soumith committed
697
698


699
class Pad(object):
700
    """Pad the given PIL Image on all sides with the given "pad" value.
701
702

    Args:
703
704
705
706
707
708
        padding (int or tuple): Padding on each border. If a single int is provided this
            is used to pad all borders. If tuple of length 2 is provided this is the padding
            on left/right and top/bottom respectively. If a tuple of length 4 is provided
            this is the padding for the left, top, right and bottom borders
            respectively.
        fill: Pixel fill value. Default is 0. If a tuple of
709
            length 3, it is used to fill R, G, B channels respectively.
710
    """
711

712
    def __init__(self, padding, fill=0):
713
714
715
716
717
718
        assert isinstance(padding, (numbers.Number, tuple))
        assert isinstance(fill, (numbers.Number, str, tuple))
        if isinstance(padding, collections.Sequence) and len(padding) not in [2, 4]:
            raise ValueError("Padding must be an int or a 2, or 4 element tuple, not a " +
                             "{} element tuple".format(len(padding)))

719
720
721
722
        self.padding = padding
        self.fill = fill

    def __call__(self, img):
723
724
        """
        Args:
725
            img (PIL Image): Image to be padded.
726
727

        Returns:
728
            PIL Image: Padded image.
729
        """
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
730
        return pad(img, self.padding, self.fill)
731

732

Soumith Chintala's avatar
Soumith Chintala committed
733
class Lambda(object):
734
735
736
737
738
    """Apply a user-defined lambda as a transform.

    Args:
        lambd (function): Lambda/function to be used for transform.
    """
739

Soumith Chintala's avatar
Soumith Chintala committed
740
    def __init__(self, lambd):
741
        assert isinstance(lambd, types.LambdaType)
Soumith Chintala's avatar
Soumith Chintala committed
742
743
744
745
746
        self.lambd = lambd

    def __call__(self, img):
        return self.lambd(img)

747

soumith's avatar
soumith committed
748
class RandomCrop(object):
749
    """Crop the given PIL Image at a random location.
750
751
752

    Args:
        size (sequence or int): Desired output size of the crop. If size is an
753
            int instead of sequence like (h, w), a square crop (size, size) is
754
755
756
757
758
            made.
        padding (int or sequence, optional): Optional padding on each border
            of the image. Default is 0, i.e no padding. If a sequence of length
            4 is provided, it is used to pad left, top, right, bottom borders
            respectively.
759
    """
760

soumith's avatar
soumith committed
761
    def __init__(self, size, padding=0):
762
763
764
765
        if isinstance(size, numbers.Number):
            self.size = (int(size), int(size))
        else:
            self.size = size
soumith's avatar
soumith committed
766
767
        self.padding = padding

768
769
    @staticmethod
    def get_params(img, output_size):
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
770
771
772
        """Get parameters for ``crop`` for a random crop.

        Args:
773
            img (PIL Image): Image to be cropped.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
774
775
776
            output_size (tuple): Expected output size of the crop.

        Returns:
777
            tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
778
        """
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
779
        w, h = img.size
780
        th, tw = output_size
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
781
        if w == tw and h == th:
782
            return 0, 0, h, w
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
783

784
785
786
        i = random.randint(0, h - th)
        j = random.randint(0, w - tw)
        return i, j, th, tw
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
787

soumith's avatar
soumith committed
788
    def __call__(self, img):
789
790
        """
        Args:
791
            img (PIL Image): Image to be cropped.
792
793

        Returns:
794
            PIL Image: Cropped image.
795
        """
soumith's avatar
soumith committed
796
        if self.padding > 0:
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
797
            img = pad(img, self.padding)
soumith's avatar
soumith committed
798

799
        i, j, h, w = self.get_params(img, self.size)
soumith's avatar
soumith committed
800

801
        return crop(img, i, j, h, w)
soumith's avatar
soumith committed
802
803
804


class RandomHorizontalFlip(object):
805
    """Horizontally flip the given PIL Image randomly with a probability of 0.5."""
806

soumith's avatar
soumith committed
807
    def __call__(self, img):
808
809
        """
        Args:
810
            img (PIL Image): Image to be flipped.
811
812

        Returns:
813
            PIL Image: Randomly flipped image.
814
        """
soumith's avatar
soumith committed
815
        if random.random() < 0.5:
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
816
            return hflip(img)
soumith's avatar
soumith committed
817
818
819
        return img


820
class RandomVerticalFlip(object):
821
    """Vertically flip the given PIL Image randomly with a probability of 0.5."""
822
823
824
825

    def __call__(self, img):
        """
        Args:
826
            img (PIL Image): Image to be flipped.
827
828

        Returns:
829
            PIL Image: Randomly flipped image.
830
831
        """
        if random.random() < 0.5:
832
            return vflip(img)
833
834
835
        return img


836
class RandomResizedCrop(object):
837
    """Crop the given PIL Image to random size and aspect ratio.
838
839
840
841
842
843
844

    A crop of random size of (0.08 to 1.0) of the original size and a random
    aspect ratio of 3/4 to 4/3 of the original aspect ratio is made. This crop
    is finally resized to given size.
    This is popularly used to train the Inception networks.

    Args:
845
        size: expected output size of each edge
846
        interpolation: Default: PIL.Image.BILINEAR
847
    """
848

soumith's avatar
soumith committed
849
    def __init__(self, size, interpolation=Image.BILINEAR):
850
        self.size = (size, size)
soumith's avatar
soumith committed
851
852
        self.interpolation = interpolation

853
854
    @staticmethod
    def get_params(img):
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
855
856
857
        """Get parameters for ``crop`` for a random sized crop.

        Args:
858
            img (PIL Image): Image to be cropped.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
859
860

        Returns:
861
            tuple: params (i, j, h, w) to be passed to ``crop`` for a random
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
862
863
                sized crop.
        """
soumith's avatar
soumith committed
864
865
866
        for attempt in range(10):
            area = img.size[0] * img.size[1]
            target_area = random.uniform(0.08, 1.0) * area
867
            aspect_ratio = random.uniform(3. / 4, 4. / 3)
soumith's avatar
soumith committed
868
869
870
871
872
873
874
875

            w = int(round(math.sqrt(target_area * aspect_ratio)))
            h = int(round(math.sqrt(target_area / aspect_ratio)))

            if random.random() < 0.5:
                w, h = h, w

            if w <= img.size[0] and h <= img.size[1]:
876
877
878
                i = random.randint(0, img.size[1] - h)
                j = random.randint(0, img.size[0] - w)
                return i, j, h, w
soumith's avatar
soumith committed
879
880

        # Fallback
881
882
883
        w = min(img.size[0], img.size[1])
        i = (img.size[1] - w) // 2
        j = (img.size[0] - w) // 2
884
        return i, j, w, w
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
885
886

    def __call__(self, img):
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
887
888
        """
        Args:
889
            img (PIL Image): Image to be flipped.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
890
891

        Returns:
892
            PIL Image: Randomly cropped and resize image.
Sasank Chilamkurthy's avatar
Sasank Chilamkurthy committed
893
        """
894
        i, j, h, w = self.get_params(img)
895
896
897
898
        return resized_crop(img, i, j, h, w, self.size, self.interpolation)


class RandomSizedCrop(RandomResizedCrop):
899
900
901
    """
    Note: This transform is deprecated in favor of RandomResizedCrop.
    """
902
903
904
905
    def __init__(self, *args, **kwargs):
        warnings.warn("The use of the transforms.RandomSizedCrop transform is deprecated, " +
                      "please use transforms.RandomResizedCrop instead.")
        super(RandomSizedCrop, self).__init__(*args, **kwargs)
906
907
908


class FiveCrop(object):
909
    """Crop the given PIL Image into four corners and the central crop.abs
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928

       Note: this transform returns a tuple of images and there may be a mismatch in the number of
       inputs and targets your `Dataset` returns.

       Args:
           size (sequence or int): Desired output size of the crop. If size is an
               int instead of sequence like (h, w), a square crop (size, size) is
               made.
    """

    def __init__(self, size):
        self.size = size
        if isinstance(size, numbers.Number):
            self.size = (int(size), int(size))
        else:
            assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
            self.size = size

    def __call__(self, img):
929
        return five_crop(img, self.size)
930
931
932


class TenCrop(object):
933
    """Crop the given PIL Image into four corners and the central crop plus the
934
935
936
937
938
939
940
941
942
       flipped version of these (horizontal flipping is used by default)

       Note: this transform returns a tuple of images and there may be a mismatch in the number of
       inputs and targets your `Dataset` returns.

       Args:
           size (sequence or int): Desired output size of the crop. If size is an
               int instead of sequence like (h, w), a square crop (size, size) is
               made.
943
           vertical_flip(bool): Use vertical flipping instead of horizontal
944
945
    """

946
    def __init__(self, size, vertical_flip=False):
947
948
949
950
951
952
        self.size = size
        if isinstance(size, numbers.Number):
            self.size = (int(size), int(size))
        else:
            assert len(size) == 2, "Please provide only two dimensions (h, w) for size."
            self.size = size
953
        self.vertical_flip = vertical_flip
954
955

    def __call__(self, img):
956
        return ten_crop(img, self.size, self.vertical_flip)
957
958


959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
class LinearTransformation(object):
    """Transform a tensor image with a square transformation matrix computed
    offline.

    Given transformation_matrix, will flatten the torch.*Tensor, compute the dot
    product with the transformation matrix and reshape the tensor to its
    original shape.

    Applications:
    - whitening: zero-center the data, compute the data covariance matrix
                 [D x D] with np.dot(X.T, X), perform SVD on this matrix and
                 pass it as transformation_matrix.

    Args:
        transformation_matrix (Tensor): tensor [D x D], D = C x H x W
    """

    def __init__(self, transformation_matrix):
        if transformation_matrix.size(0) != transformation_matrix.size(1):
            raise ValueError("transformation_matrix should be square. Got " +
                             "[{} x {}] rectangular matrix.".format(*transformation_matrix.size()))
        self.transformation_matrix = transformation_matrix

    def __call__(self, tensor):
        """
        Args:
            tensor (Tensor): Tensor image of size (C, H, W) to be whitened.

        Returns:
            Tensor: Transformed image.
        """
        if tensor.size(0) * tensor.size(1) * tensor.size(2) != self.transformation_matrix.size(0):
            raise ValueError("tensor and transformation matrix have incompatible shape." +
                             "[{} x {} x {}] != ".format(*tensor.size()) +
                             "{}".format(self.transformation_matrix.size(0)))
        flat_tensor = tensor.view(1, -1)
        transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix)
        tensor = transformed_tensor.view(tensor.size())
        return tensor


1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
class ColorJitter(object):
    """Randomly change the brightness, contrast and saturation of an image.

    Args:
        brightness (float): How much to jitter brightness. brightness_factor
            is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].
        contrast (float): How much to jitter contrast. contrast_factor
            is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].
        saturation (float): How much to jitter saturation. saturation_factor
            is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].
        hue(float): How much to jitter hue. hue_factor is chosen uniformly from
            [-hue, hue]. Should be >=0 and <= 0.5.
    """
    def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
        self.brightness = brightness
        self.contrast = contrast
        self.saturation = saturation
        self.hue = hue

    @staticmethod
    def get_params(brightness, contrast, saturation, hue):
        """Get a randomized transform to be applied on image.

        Arguments are same as that of __init__.

        Returns:
            Transform which randomly adjusts brightness, contrast and
            saturation in a random order.
        """
        transforms = []
        if brightness > 0:
            brightness_factor = np.random.uniform(max(0, 1 - brightness), 1 + brightness)
            transforms.append(Lambda(lambda img: adjust_brightness(img, brightness_factor)))

        if contrast > 0:
            contrast_factor = np.random.uniform(max(0, 1 - contrast), 1 + contrast)
            transforms.append(Lambda(lambda img: adjust_contrast(img, contrast_factor)))

        if saturation > 0:
            saturation_factor = np.random.uniform(max(0, 1 - saturation), 1 + saturation)
            transforms.append(Lambda(lambda img: adjust_saturation(img, saturation_factor)))

        if hue > 0:
            hue_factor = np.random.uniform(-hue, hue)
            transforms.append(Lambda(lambda img: adjust_hue(img, hue_factor)))

        np.random.shuffle(transforms)
        transform = Compose(transforms)

        return transform

    def __call__(self, img):
        """
        Args:
1054
            img (PIL Image): Input image.
1055
1056

        Returns:
1057
            PIL Image: Color jittered image.
1058
1059
1060
1061
        """
        transform = self.get_params(self.brightness, self.contrast,
                                    self.saturation, self.hue)
        return transform(img)