Commit cb129912 authored by Benjamin Thomas Graham's avatar Benjamin Thomas Graham
Browse files

tidy

parent 5a42d7a9
...@@ -9,6 +9,60 @@ from torch.nn import Module ...@@ -9,6 +9,60 @@ from torch.nn import Module
from .utils import * from .utils import *
from .sparseConvNetTensor import SparseConvNetTensor from .sparseConvNetTensor import SparseConvNetTensor
class AveragePooling(Module):
"""
Average Pooling for SparseConvNetTensors.
Parameters:
dimension i.e. 3
pool_size i.e. 3 or [3,3,3]
pool_stride i.e. 2 or [2,2,2]
"""
def __init__(self, dimension, pool_size, pool_stride, nFeaturesToDrop=0):
super(AveragePooling, self).__init__()
self.dimension = dimension
self.pool_size = toLongTensor(dimension, pool_size)
self.pool_stride = toLongTensor(dimension, pool_stride)
self.nFeaturesToDrop = nFeaturesToDrop
def forward(self, input):
output = SparseConvNetTensor()
output.metadata = input.metadata
output.spatial_size = (
input.spatial_size - self.pool_size) / self.pool_stride + 1
assert ((output.spatial_size - 1) * self.pool_stride +
self.pool_size == input.spatial_size).all()
output.features = AveragePoolingFunction.apply(
input.features,
input.metadata,
input.spatial_size,
output.spatial_size,
self.dimension,
self.pool_size,
self.pool_stride,
self.nFeaturesToDrop)
return output
def input_spatial_size(self, out_size):
return (out_size - 1) * self.pool_stride + self.pool_size
def __repr__(self):
s = 'AveragePooling'
if self.pool_size.max().item() == self.pool_size.min().item() and\
self.pool_stride.max().item() == self.pool_stride.min().item():
s = s + str(self.pool_size[0].item()) + \
'/' + str(self.pool_stride[0].item())
else:
s = s + '(' + str(self.pool_size[0].item())
for i in self.pool_size[1:]:
s = s + ',' + str(i.item())
s = s + ')/(' + str(self.pool_stride[0].item())
for i in self.pool_stride[1:]:
s = s + ',' + str(i.item())
s = s + ')'
if self.nFeaturesToDrop > 0:
s = s + ' nFeaturesToDrop = ' + self.nFeaturesToDrop
return s
class AveragePoolingFunction(Function): class AveragePoolingFunction(Function):
@staticmethod @staticmethod
...@@ -68,52 +122,3 @@ class AveragePoolingFunction(Function): ...@@ -68,52 +122,3 @@ class AveragePoolingFunction(Function):
ctx.nFeaturesToDrop, ctx.nFeaturesToDrop,
torch.cuda.IntTensor() if input_features.is_cuda else nullptr) torch.cuda.IntTensor() if input_features.is_cuda else nullptr)
return grad_input, None, None, None, None, None, None, None return grad_input, None, None, None, None, None, None, None
class AveragePooling(Module):
def __init__(self, dimension, pool_size, pool_stride, nFeaturesToDrop=0):
super(AveragePooling, self).__init__()
self.dimension = dimension
self.pool_size = toLongTensor(dimension, pool_size)
self.pool_stride = toLongTensor(dimension, pool_stride)
self.nFeaturesToDrop = nFeaturesToDrop
def forward(self, input):
output = SparseConvNetTensor()
output.metadata = input.metadata
output.spatial_size = (
input.spatial_size - self.pool_size) / self.pool_stride + 1
assert ((output.spatial_size - 1) * self.pool_stride +
self.pool_size == input.spatial_size).all()
output.features = AveragePoolingFunction.apply(
input.features,
input.metadata,
input.spatial_size,
output.spatial_size,
self.dimension,
self.pool_size,
self.pool_stride,
self.nFeaturesToDrop)
return output
def input_spatial_size(self, out_size):
return (out_size - 1) * self.pool_stride + self.pool_size
def __repr__(self):
s = 'AveragePooling'
if self.pool_size.max().item() == self.pool_size.min().item() and\
self.pool_stride.max().item() == self.pool_stride.min().item():
s = s + str(self.pool_size[0].item()) + \
'/' + str(self.pool_stride[0].item())
else:
s = s + '(' + str(self.pool_size[0].item())
for i in self.pool_size[1:]:
s = s + ',' + str(i.item())
s = s + ')/(' + str(self.pool_stride[0].item())
for i in self.pool_stride[1:]:
s = s + ',' + str(i.item())
s = s + ')'
if self.nFeaturesToDrop > 0:
s = s + ' nFeaturesToDrop = ' + self.nFeaturesToDrop
return s
...@@ -4,22 +4,92 @@ ...@@ -4,22 +4,92 @@
# This source code is licensed under the license found in the # This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree. # LICENSE file in the root directory of this source tree.
"""
Parameters:
nPlanes : number of input planes
eps : small number used to stabilise standard deviation calculation
momentum : for calculating running average for testing (default 0.9)
affine : only 'true' is supported at present (default 'true')
noise : add multiplicative and additive noise during training if >0.
leakiness : Apply activation def inplace: 0<=leakiness<=1.
0 for ReLU, values in (0,1) for LeakyReLU, 1 for no activation def.
"""
from torch.autograd import Function from torch.autograd import Function
from torch.nn import Module, Parameter from torch.nn import Module, Parameter
from .utils import * from .utils import *
from .sparseConvNetTensor import SparseConvNetTensor from .sparseConvNetTensor import SparseConvNetTensor
class BatchNormalization(Module):
"""
Parameters:
nPlanes : number of input planes
eps : small number used to stabilise standard deviation calculation
momentum : for calculating running average for testing (default 0.9)
affine : only 'true' is supported at present (default 'true')
noise : add multiplicative and additive noise during training if >0.
leakiness : Apply activation def inplace: 0<=leakiness<=1.
0 for ReLU, values in (0,1) for LeakyReLU, 1 for no activation def.
"""
def __init__(
self,
nPlanes,
eps=1e-4,
momentum=0.9,
affine=True,
leakiness=1):
Module.__init__(self)
self.nPlanes = nPlanes
self.eps = eps
self.momentum = momentum
self.affine = affine
self.leakiness = leakiness
self.register_buffer("runningMean", torch.Tensor(nPlanes).fill_(0))
self.register_buffer("runningVar", torch.Tensor(nPlanes).fill_(1))
if affine:
self.weight = Parameter(torch.Tensor(nPlanes).fill_(1))
self.bias = Parameter(torch.Tensor(nPlanes).fill_(0))
else:
self.weight = None
self.bias = None
def forward(self, input):
assert input.features.ndimension() == 0 or input.features.size(1) == self.nPlanes
output = SparseConvNetTensor()
output.metadata = input.metadata
output.spatial_size = input.spatial_size
output.features = BatchNormalizationFunction.apply(
input.features,
self.weight,
self.bias,
self.runningMean,
self.runningVar,
self.eps,
self.momentum,
self.training,
self.leakiness)
return output
def input_spatial_size(self, out_size):
return out_size
def __repr__(self):
s = 'BatchNorm(' + str(self.nPlanes) + ',eps=' + str(self.eps) + \
',momentum=' + str(self.momentum) + ',affine=' + str(self.affine)
if self.leakiness > 0:
s = s + ',leakiness=' + str(self.leakiness)
s = s + ')'
return s
class BatchNormReLU(BatchNormalization):
def __init__(self, nPlanes, eps=1e-4, momentum=0.9):
BatchNormalization.__init__(self, nPlanes, eps, momentum, True, 0)
def __repr__(self):
s = 'BatchNormReLU(' + str(self.nPlanes) + ',eps=' + str(self.eps) + \
',momentum=' + str(self.momentum) + ',affine=' + str(self.affine) + ')'
return s
class BatchNormLeakyReLU(BatchNormalization):
def __init__(self, nPlanes, eps=1e-4, momentum=0.9):
BatchNormalization.__init__(self, nPlanes, eps, momentum, True, 0.333)
def __repr__(self):
s = 'BatchNormReLU(' + str(self.nPlanes) + ',eps=' + str(self.eps) + \
',momentum=' + str(self.momentum) + ',affine=' + str(self.affine) + ')'
return s
class BatchNormalizationFunction(Function): class BatchNormalizationFunction(Function):
@staticmethod @staticmethod
...@@ -98,75 +168,3 @@ class BatchNormalizationFunction(Function): ...@@ -98,75 +168,3 @@ class BatchNormalizationFunction(Function):
grad_bias.data if grad_bias is not None else nullptr, grad_bias.data if grad_bias is not None else nullptr,
ctx.leakiness) ctx.leakiness)
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
class BatchNormalization(Module):
def __init__(
self,
nPlanes,
eps=1e-4,
momentum=0.9,
affine=True,
leakiness=1):
Module.__init__(self)
self.nPlanes = nPlanes
self.eps = eps
self.momentum = momentum
self.affine = affine
self.leakiness = leakiness
self.register_buffer("runningMean", torch.Tensor(nPlanes).fill_(0))
self.register_buffer("runningVar", torch.Tensor(nPlanes).fill_(1))
if affine:
self.weight = Parameter(torch.Tensor(nPlanes).fill_(1))
self.bias = Parameter(torch.Tensor(nPlanes).fill_(0))
else:
self.weight = None
self.bias = None
def forward(self, input):
assert input.features.ndimension() == 0 or input.features.size(1) == self.nPlanes
output = SparseConvNetTensor()
output.metadata = input.metadata
output.spatial_size = input.spatial_size
output.features = BatchNormalizationFunction.apply(
input.features,
self.weight,
self.bias,
self.runningMean,
self.runningVar,
self.eps,
self.momentum,
self.training,
self.leakiness)
return output
def input_spatial_size(self, out_size):
return out_size
def __repr__(self):
s = 'BatchNorm(' + str(self.nPlanes) + ',eps=' + str(self.eps) + \
',momentum=' + str(self.momentum) + ',affine=' + str(self.affine)
if self.leakiness > 0:
s = s + ',leakiness=' + str(self.leakiness)
s = s + ')'
return s
class BatchNormReLU(BatchNormalization):
def __init__(self, nPlanes, eps=1e-4, momentum=0.9):
BatchNormalization.__init__(self, nPlanes, eps, momentum, True, 0)
def __repr__(self):
s = 'BatchNormReLU(' + str(self.nPlanes) + ',eps=' + str(self.eps) + \
',momentum=' + str(self.momentum) + ',affine=' + str(self.affine) + ')'
return s
class BatchNormLeakyReLU(BatchNormalization):
def __init__(self, nPlanes, eps=1e-4, momentum=0.9):
BatchNormalization.__init__(self, nPlanes, eps, momentum, True, 0.333)
def __repr__(self):
s = 'BatchNormReLU(' + str(self.nPlanes) + ',eps=' + str(self.eps) + \
',momentum=' + str(self.momentum) + ',affine=' + str(self.affine) + ')'
return s
...@@ -13,12 +13,12 @@ from .metadata import Metadata ...@@ -13,12 +13,12 @@ from .metadata import Metadata
class InputLayer(Module): class InputLayer(Module):
""" """
Takes a tuple (coords, features, batch_size [optional]) Takes a tuple (coords, features, batch_size [optional])
* coords is 2d with size * coords is 2d torch.LongTensor with size
N x dimension (batch size == 1) N x dimension (batch size == 1)
or or
N x (dimension+1) (first d columns are coordinates, last column is batch index) N x (dimension+1) (first d columns are coordinates, last column is batch index)
* features is a tensor with size * features is a CPU or CUDA float tensor with size
N x n_feature_planes N x n_feature_planes
...@@ -49,7 +49,7 @@ class InputLayer(Module): ...@@ -49,7 +49,7 @@ class InputLayer(Module):
self.dimension, self.dimension,
output.metadata, output.metadata,
self.spatial_size, self.spatial_size,
input[0], input[0].type(torch.LongTensor),
input[1], input[1],
0 if len(input) == 2 else input[2], 0 if len(input) == 2 else input[2],
self.mode self.mode
...@@ -60,12 +60,12 @@ class InputLayer(Module): ...@@ -60,12 +60,12 @@ class InputLayer(Module):
class BLInputLayer(Module): class BLInputLayer(Module):
""" """
Takes a tuple (coords, features) Takes a tuple (coords, features)
* coords is 3d LongTensor with size * coords is 3d torch.LongTensor with size
batch_size x length x dimension batch_size x length x dimension
Coordinates should be >=0, or -1 to indicate 'empty' Coordinates should be >=0, or -1 to indicate 'empty'
* features is a 3d float Tensor with size * features is a 3d CPU or CUDA float Tensor with size
batch_size x length x n_feature_planes batch_size x length x n_feature_planes
...@@ -93,7 +93,7 @@ class BLInputLayer(Module): ...@@ -93,7 +93,7 @@ class BLInputLayer(Module):
self.dimension, self.dimension,
output.metadata, output.metadata,
self.spatial_size, self.spatial_size,
input[0], input[0].type(torch.LongTensor),
input[1], input[1],
self.mode self.mode
) )
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment