Commit 98b9aa54 authored by Soumith Chintala's avatar Soumith Chintala Committed by GitHub
Browse files

Merge pull request #8 from pytorch/lambda

adding lambda transform
parents 7ab42043 6b175db5
...@@ -194,7 +194,6 @@ This is popularly used to train the Inception networks ...@@ -194,7 +194,6 @@ This is popularly used to train the Inception networks
- size: size of the smaller edge - size: size of the smaller edge
- interpolation: Default: PIL.Image.BILINEAR - interpolation: Default: PIL.Image.BILINEAR
### `Pad(padding, fill=0)` ### `Pad(padding, fill=0)`
Pads the given image on each side with `padding` number of pixels, and the padding pixels are filled with Pads the given image on each side with `padding` number of pixels, and the padding pixels are filled with
pixel value `fill`. pixel value `fill`.
...@@ -209,6 +208,14 @@ Given mean: (R, G, B) and std: (R, G, B), will normalize each channel of the tor ...@@ -209,6 +208,14 @@ Given mean: (R, G, B) and std: (R, G, B), will normalize each channel of the tor
- `ToTensor()` - Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] - `ToTensor()` - Converts a PIL.Image (RGB) or numpy.ndarray (H x W x C) in the range [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0]
- `ToPILImage()` - Converts a torch.*Tensor of range [0, 1] and shape C x H x W or numpy ndarray of dtype=uint8, range[0, 255] and shape H x W x C to a PIL.Image of range [0, 255] - `ToPILImage()` - Converts a torch.*Tensor of range [0, 1] and shape C x H x W or numpy ndarray of dtype=uint8, range[0, 255] and shape H x W x C to a PIL.Image of range [0, 255]
## Generic Transofrms
### `Lambda(lambda)`
Given a Python lambda, applies it to the input `img` and returns it.
For example:
```python
transforms.Lambda(lambda x: x.add(10))
```
# Utils # Utils
......
...@@ -100,10 +100,19 @@ class Tester(unittest.TestCase): ...@@ -100,10 +100,19 @@ class Tester(unittest.TestCase):
transforms.Pad(padding), transforms.Pad(padding),
transforms.ToTensor(), transforms.ToTensor(),
])(img) ])(img)
print(height, width, padding)
print(result.size(1), result.size(2))
assert result.size(1) == height + 2*padding assert result.size(1) == height + 2*padding
assert result.size(2) == width + 2*padding assert result.size(2) == width + 2*padding
def test_lambda(self):
trans = transforms.Lambda(lambda x: x.add(10))
x = torch.randn(10)
y = trans(x)
assert(y.equal(torch.add(x, 10)))
trans = transforms.Lambda(lambda x: x.add_(10))
x = torch.randn(10)
y = trans(x)
assert(y.equal(x))
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -5,6 +5,7 @@ import random ...@@ -5,6 +5,7 @@ import random
from PIL import Image, ImageOps from PIL import Image, ImageOps
import numpy as np import numpy as np
import numbers import numbers
import types
class Compose(object): class Compose(object):
""" Composes several transforms together. """ Composes several transforms together.
...@@ -126,6 +127,15 @@ class Pad(object): ...@@ -126,6 +127,15 @@ class Pad(object):
def __call__(self, img): def __call__(self, img):
return ImageOps.expand(img, border=self.padding, fill=self.fill) return ImageOps.expand(img, border=self.padding, fill=self.fill)
class Lambda(object):
"""Applies a lambda as a transform"""
def __init__(self, lambd):
assert type(lambd) is types.LambdaType
self.lambd = lambd
def __call__(self, img):
return self.lambd(img)
class RandomCrop(object): class RandomCrop(object):
"""Crops the given PIL.Image at a random location to have a region of """Crops the given PIL.Image at a random location to have a region of
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment