Commit 82bcd1db authored by Ben Graham's avatar Ben Graham Committed by GitHub
Browse files

Merge pull request #9 from gnedster/batchwise_in_tensor

Add BatchwiseDropoutInTensor Layer
parents 9c865087 898f6b62
...@@ -11,7 +11,7 @@ from ..sparseConvNetTensor import SparseConvNetTensor ...@@ -11,7 +11,7 @@ from ..sparseConvNetTensor import SparseConvNetTensor
from .sparseModule import SparseModule from .sparseModule import SparseModule
from .averagePooling import AveragePooling from .averagePooling import AveragePooling
from .batchNormalization import BatchNormalization, BatchNormReLU, BatchNormLeakyReLU, BatchNormalizationInTensor from .batchNormalization import BatchNormalization, BatchNormReLU, BatchNormLeakyReLU, BatchNormalizationInTensor
from .batchwiseDropout import BatchwiseDropout from .batchwiseDropout import BatchwiseDropout, BatchwiseDropoutInTensor
from .concatTable import ConcatTable from .concatTable import ConcatTable
from .convolution import Convolution from .convolution import Convolution
from .cAddTable import CAddTable from .cAddTable import CAddTable
......
...@@ -33,6 +33,7 @@ class BatchwiseDropout(SparseModule): ...@@ -33,6 +33,7 @@ class BatchwiseDropout(SparseModule):
self.p = p self.p = p
self.leakiness = leaky self.leakiness = leaky
self.noise = torch.Tensor(nPlanes) self.noise = torch.Tensor(nPlanes)
self.nPlanes = nPlanes
self.output = None if ip else SparseConvNetTensor(torch.Tensor()) self.output = None if ip else SparseConvNetTensor(torch.Tensor())
self.gradInput = None if ip else torch.Tensor() self.gradInput = None if ip else torch.Tensor()
...@@ -74,7 +75,7 @@ class BatchwiseDropout(SparseModule): ...@@ -74,7 +75,7 @@ class BatchwiseDropout(SparseModule):
if not self.inplace: if not self.inplace:
self.output.features.type(t) self.output.features.type(t)
self.gradInput.features.type(t) self.gradInput.type(t)
SparseModule.type(self, t, tensorCache) SparseModule.type(self, t, tensorCache)
...@@ -92,3 +93,55 @@ class BatchwiseDropout(SparseModule): ...@@ -92,3 +93,55 @@ class BatchwiseDropout(SparseModule):
s = s + ',leakiness=' + str(self.leakiness) s = s + ',leakiness=' + str(self.leakiness)
s = s + ')' s = s + ')'
return s return s
class BatchwiseDropoutInTensor(BatchwiseDropout):
def __init__(
self,
nPlanes,
p,
output_column_offset=0,
leaky=1):
BatchwiseDropout.__init__(self, nPlanes, p, False, leaky)
self.output_column_offset = output_column_offset
def updateOutput(self, input):
if self.train:
self.noise.bernoulli_(1-self.p)
else:
self.noise.fill_(1-self.p)
self.output.metadata = input.metadata
self.output.spatial_size = input.spatial_size
o = self.output.features.narrow(
1, self.output_column_offset, self.nPlanes)
typed_fn(input.features, 'BatchwiseMultiplicativeDropout_updateOutput')(
input.features,
o,
self.noise,
self.leakiness
)
return self.output
def updateGradInput(self, input, gradOutput):
assert self.train
d_o = gradOutput.narrow(1, self.output_column_offset, self.nPlanes)
typed_fn(input.features, 'BatchwiseMultiplicativeDropout_updateGradInput')(
input.features,
self.gradInput,
d_o,
self.noise,
self.leakiness
)
return self.gradInput
def __repr__(self):
s = 'BatchwiseDropoutInTensor(' + str(self.nPlanes) + ',p=' + str(self.p) + \
',column_offset=' + str(self.output_column_offset)
if self.leakiness > 0:
s = s + ',leakiness=' + str(self.leakiness)
s = s + ')'
return s
...@@ -10,7 +10,6 @@ from . import SparseModule ...@@ -10,7 +10,6 @@ from . import SparseModule
from ..utils import toLongTensor, dim_typed_fn, optionalTensor, nullptr from ..utils import toLongTensor, dim_typed_fn, optionalTensor, nullptr
from ..sparseConvNetTensor import SparseConvNetTensor from ..sparseConvNetTensor import SparseConvNetTensor
class ValidConvolution(SparseModule): class ValidConvolution(SparseModule):
def __init__(self, dimension, nIn, nOut, filter_size, bias): def __init__(self, dimension, nIn, nOut, filter_size, bias):
SparseModule.__init__(self) SparseModule.__init__(self)
...@@ -51,6 +50,7 @@ class ValidConvolution(SparseModule): ...@@ -51,6 +50,7 @@ class ValidConvolution(SparseModule):
def backward(self, input, gradOutput, scale=1): def backward(self, input, gradOutput, scale=1):
assert scale == 1 assert scale == 1
dim_typed_fn(self.dimension, input.features, 'ValidConvolution_backward')( dim_typed_fn(self.dimension, input.features, 'ValidConvolution_backward')(
input.spatial_size, input.spatial_size,
self.filter_size, self.filter_size,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment