Commit 5a42d7a9 authored by Benjamin Thomas Graham's avatar Benjamin Thomas Graham
Browse files

tidy

parent c54569a8
...@@ -17,8 +17,8 @@ extern "C" void scn_DR_(SparseToDense_updateOutput)( ...@@ -17,8 +17,8 @@ extern "C" void scn_DR_(SparseToDense_updateOutput)(
{ {
long sz[Dimension + 2]; long sz[Dimension + 2];
sz[0] = _m.grids.begin()->second.size(); sz[0] = _m.grids.begin()->second.size(); //batch size
sz[1] = nPlanes; // input_features->size[1]; sz[1] = nPlanes;
std::memcpy(sz + 2, THLongTensor_data(inputSize), sizeof(long) * Dimension); std::memcpy(sz + 2, THLongTensor_data(inputSize), sizeof(long) * Dimension);
THTensor_(resizeNd)(output_features, Dimension + 2, sz, NULL); THTensor_(resizeNd)(output_features, Dimension + 2, sz, NULL);
THTensor_(zero)(output_features); THTensor_(zero)(output_features);
...@@ -45,13 +45,12 @@ extern "C" void scn_DR_(SparseToDense_updateGradInput)( ...@@ -45,13 +45,12 @@ extern "C" void scn_DR_(SparseToDense_updateGradInput)(
SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m) SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m)
THTensor_(resizeAs)(d_input_features, input_features); THTensor_(resizeAs)(d_input_features, input_features);
THTensor_(zero)(d_input_features); THTensor_(zero)(d_input_features);
auto _rules = _m.getSparseToDenseRuleBook(inputSize, true);
if (input_features->nDimension == 2) { if (input_features->nDimension == 2) {
auto _rules = _m.getSparseToDenseRuleBook(inputSize, true);
long spatialVolume = THLongTensor_prodall(inputSize); long spatialVolume = THLongTensor_prodall(inputSize);
uInt _nPlanes = d_input_features->size[1]; uInt _nPlanes = d_input_features->size[1];
auto diF = THTensor_(data)(d_input_features); auto diF = THTensor_(data)(d_input_features);
auto doF = THTensor_(data)(d_output_features); auto doF = THTensor_(data)(d_output_features);
for (auto &r : _rules) { for (auto &r : _rules) {
uInt nHot = r.size() / 2; uInt nHot = r.size() / 2;
SparseToDense_BackwardPass<real>(diF, doF, _nPlanes, spatialVolume, &r[0], SparseToDense_BackwardPass<real>(diF, doF, _nPlanes, spatialVolume, &r[0],
......
...@@ -27,7 +27,7 @@ void SparseToDense_BackwardPass(T *d_input_features, T *d_output_features, ...@@ -27,7 +27,7 @@ void SparseToDense_BackwardPass(T *d_input_features, T *d_output_features,
for (uInt outSite = 0; outSite < nHot; outSite++) { for (uInt outSite = 0; outSite < nHot; outSite++) {
T *d_i = d_input_features + rules[2 * outSite] * nPlanes; T *d_i = d_input_features + rules[2 * outSite] * nPlanes;
auto d_o = d_output_features + rules[2 * outSite + 1]; T *d_o = d_output_features + rules[2 * outSite + 1];
for (uInt plane = 0; plane < nPlanes; plane++) for (uInt plane = 0; plane < nPlanes; plane++)
d_i[plane] = d_o[plane * spatialVolume]; d_i[plane] = d_o[plane * spatialVolume];
} }
......
...@@ -14,16 +14,11 @@ extern "C" void scn_DR_(SparseToDense_updateOutput)( ...@@ -14,16 +14,11 @@ extern "C" void scn_DR_(SparseToDense_updateOutput)(
THCTensor *output_features, THCITensor *rulesBuffer, long nPlanes) { THCTensor *output_features, THCITensor *rulesBuffer, long nPlanes) {
SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m) SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m)
long spatialVolume = 1;
{ {
long sz[Dimension + 2]; long sz[Dimension + 2];
sz[0] = _m.grids.begin()->second.size(); sz[0] = _m.grids.begin()->second.size(); //batch size
sz[1] = nPlanes; // input_features->size[1]; sz[1] = nPlanes;
for (int i = 0; i < Dimension; i++) { std::memcpy(sz + 2, THLongTensor_data(inputSize), sizeof(long) * Dimension);
auto x = THLongTensor_data(inputSize)[i];
sz[i + 2] = x;
spatialVolume *= x;
}
THCTensor_(resizeNd)(state, output_features, Dimension + 2, sz, NULL); THCTensor_(resizeNd)(state, output_features, Dimension + 2, sz, NULL);
THCTensor_(zero)(state, output_features); THCTensor_(zero)(state, output_features);
} }
...@@ -32,6 +27,7 @@ extern "C" void scn_DR_(SparseToDense_updateOutput)( ...@@ -32,6 +27,7 @@ extern "C" void scn_DR_(SparseToDense_updateOutput)(
uInt _nPlanes = input_features->size[1]; uInt _nPlanes = input_features->size[1];
auto iF = THCTensor_(data)(state, input_features); auto iF = THCTensor_(data)(state, input_features);
auto oF = THCTensor_(data)(state, output_features); auto oF = THCTensor_(data)(state, output_features);
long spatialVolume = THLongTensor_prodall(inputSize);
RULEBOOKITERATOR( RULEBOOKITERATOR(
SparseToDense_ForwardPass<real>(THCState_getCurrentStream(state), iF, SparseToDense_ForwardPass<real>(THCState_getCurrentStream(state), iF,
oF, _nPlanes, spatialVolume, rbB, nHotB); oF, _nPlanes, spatialVolume, rbB, nHotB);
......
...@@ -49,35 +49,25 @@ void scn_8_batchAddSample(void **m){} ...@@ -49,35 +49,25 @@ void scn_8_batchAddSample(void **m){}
void scn_9_batchAddSample(void **m){} void scn_9_batchAddSample(void **m){}
void scn_10_batchAddSample(void **m){} void scn_10_batchAddSample(void **m){}
void scn_1_createMetadataForDenseToSparse( void scn_1_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize){}
long batchSize){}
void scn_2_createMetadataForDenseToSparse( void scn_2_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize){}
long batchSize){}
void scn_3_createMetadataForDenseToSparse( void scn_3_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize){}
long batchSize){}
void scn_4_createMetadataForDenseToSparse( void scn_4_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize){}
long batchSize){}
void scn_5_createMetadataForDenseToSparse( void scn_5_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize){}
long batchSize){}
void scn_6_createMetadataForDenseToSparse( void scn_6_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize){}
long batchSize){}
void scn_7_createMetadataForDenseToSparse( void scn_7_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize){}
long batchSize){}
void scn_8_createMetadataForDenseToSparse( void scn_8_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize){}
long batchSize){}
void scn_9_createMetadataForDenseToSparse( void scn_9_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize){}
long batchSize){}
void scn_10_createMetadataForDenseToSparse( void scn_10_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize){}
long batchSize){}
void scn_1_freeMetadata(void **metadata){} void scn_1_freeMetadata(void **metadata){}
void scn_2_freeMetadata(void **metadata){} void scn_2_freeMetadata(void **metadata){}
void scn_3_freeMetadata(void **metadata){} void scn_3_freeMetadata(void **metadata){}
......
...@@ -49,35 +49,25 @@ void scn_8_batchAddSample(void **m); ...@@ -49,35 +49,25 @@ void scn_8_batchAddSample(void **m);
void scn_9_batchAddSample(void **m); void scn_9_batchAddSample(void **m);
void scn_10_batchAddSample(void **m); void scn_10_batchAddSample(void **m);
void scn_1_createMetadataForDenseToSparse( void scn_1_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize);
long batchSize);
void scn_2_createMetadataForDenseToSparse( void scn_2_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize);
long batchSize);
void scn_3_createMetadataForDenseToSparse( void scn_3_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize);
long batchSize);
void scn_4_createMetadataForDenseToSparse( void scn_4_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize);
long batchSize);
void scn_5_createMetadataForDenseToSparse( void scn_5_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize);
long batchSize);
void scn_6_createMetadataForDenseToSparse( void scn_6_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize);
long batchSize);
void scn_7_createMetadataForDenseToSparse( void scn_7_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize);
long batchSize);
void scn_8_createMetadataForDenseToSparse( void scn_8_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize);
long batchSize);
void scn_9_createMetadataForDenseToSparse( void scn_9_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize);
long batchSize);
void scn_10_createMetadataForDenseToSparse( void scn_10_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize);
long batchSize);
void scn_1_freeMetadata(void **metadata); void scn_1_freeMetadata(void **metadata);
void scn_2_freeMetadata(void **metadata); void scn_2_freeMetadata(void **metadata);
void scn_3_freeMetadata(void **metadata); void scn_3_freeMetadata(void **metadata);
......
...@@ -84,8 +84,7 @@ void scn_DIMENSION_batchAddSample(void **m)""") ...@@ -84,8 +84,7 @@ void scn_DIMENSION_batchAddSample(void **m)""")
dim_fn(""" dim_fn("""
void scn_DIMENSION_createMetadataForDenseToSparse( void scn_DIMENSION_createMetadataForDenseToSparse(
void **m, THLongTensor *spatialSize_, THLongTensor *pad, THLongTensor *nz, void **m, THLongTensor *spatialSize_, THLongTensor *nz, long batchSize)""")
long batchSize)""")
dim_fn(""" dim_fn("""
void scn_DIMENSION_freeMetadata(void **metadata)""") void scn_DIMENSION_freeMetadata(void **metadata)""")
......
...@@ -4,20 +4,40 @@ ...@@ -4,20 +4,40 @@
# This source code is licensed under the license found in the # This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree. # LICENSE file in the root directory of this source tree.
"""
Function to convert a Dense Input into a sparse input.
If possible, avoid using this module; build the hidden layer using InputBatch.
Parameters:
dimension : of the input field
"""
from torch.autograd import Function from torch.autograd import Function
from torch.nn import Module from torch.nn import Module
from .utils import * from .utils import *
from .metadata import Metadata from .metadata import Metadata
from .sparseConvNetTensor import SparseConvNetTensor from .sparseConvNetTensor import SparseConvNetTensor
class DenseToSparse(Module):
"""
Function to convert a Dense Input into a sparse input.
If possible, avoid using this module; build the hidden layer using InputBatch.
Parameters:
dimension : of the input field
"""
def __init__(self, dimension):
Module.__init__(self)
self.dimension = dimension
def forward(self, input):
output = SparseConvNetTensor()
output.metadata = Metadata(self.dimension)
output.spatial_size = torch.LongTensor(list(input.size()[2:]))
output.features = DenseToSparseFunction.apply(
input,
output.metadata,
output.spatial_size,
self.dimension)
return output
def __repr__(self):
return 'DenseToSparse(' + str(self.dimension) + ')'
def input_spatial_size(self, out_size):
return out_size
class DenseToSparseFunction(Function): class DenseToSparseFunction(Function):
@staticmethod @staticmethod
...@@ -30,51 +50,30 @@ class DenseToSparseFunction(Function): ...@@ -30,51 +50,30 @@ class DenseToSparseFunction(Function):
ctx.dimension = dimension ctx.dimension = dimension
aa = input.permute( aa = input.permute(
*([0, ] + list(range(2, 2 + dimension)) + [1, ])).clone() *([0, ] + list(range(2, 2 + dimension)) + [1, ])).clone()
aas = aa.size() ctx.aas = aa.size()
nz = aa.abs().sum(dimension + 1).view(aa.size()[0:-1]) nz = aa.abs().sum(dimension + 1).view(aa.size()[0:-1])
s = torch.LongTensor(nz.stride()).view(1, dimension + 1) s = torch.LongTensor(nz.stride()).view(1, dimension + 1)
nz = nz.nonzero() nz = nz.nonzero()
s = s.type_as(nz) s = s.type_as(nz)
aa = aa.view(-1, input.size(1)) aa = aa.view(-1, input.size(1))
aas2 = aa.size() ctx.aas2 = aa.size()
r = (nz * s.expand_as(nz)).sum(1).view(-1) r = (nz * s.expand_as(nz)).sum(1).view(-1)
output_features = aa.index_select(0, ctx.r) output_features = aa.index_select(0, r)
dim_fn(dimension, 'createMetadataForDenseToSparse')( dim_fn(dimension, 'createMetadataForDenseToSparse')(
output_metadata.ffi, output_metadata.ffi,
output_spatial_size, output_spatial_size,
nz.cpu(), nz.cpu(),
input.size(0)) input.size(0))
ctx.save_for_backwards(output_features, aas, aas2, r) ctx.save_for_backward(output_features, r)
return output_features return output_features
@staticmethod @staticmethod
def backward(ctx, grad_output): def backward(ctx, grad_output):
output_features, aas, aas2, r = ctx.saved_tensors output_features, r = ctx.saved_tensors
print(r)
print(grad_output)
grad_input = grad_output.new().resize_( grad_input = grad_output.new().resize_(
aas2).zero_().index_copy_(0, r, grad_output.data) ctx.aas2).zero_().index_copy_(0, r, grad_output.data)
grad_input = grad_input.view(aas).permute( grad_input = grad_input.view(ctx.aas).permute(
*([0, ctx.dimension + 1] + list(range(1, ctx.dimension + 1)))) *([0, ctx.dimension + 1] + list(range(1, ctx.dimension + 1))))
return grad_input, None, None, None return grad_input, None, None, None
class DenseToSparse(Module):
def __init__(self, dimension):
Module.__init__(self)
self.dimension = dimension
def forward(self, input):
output = SparseConvNetTensor()
output.metadata = Metadata(self.dimension)
output.spatial_size = torch.LongTensor(list(input.size()[2:]))
output.features = DenseToSparseFunction.apply(
input,
output.metadata,
output.spatial_size,
self.dimension)
return output
def __repr__(self):
return 'DenseToSparse(' + str(self.dimension) + ')'
def input_spatial_size(self, out_size):
return out_size
...@@ -12,28 +12,28 @@ from .sparseConvNetTensor import SparseConvNetTensor ...@@ -12,28 +12,28 @@ from .sparseConvNetTensor import SparseConvNetTensor
class InputBatch(SparseConvNetTensor): class InputBatch(SparseConvNetTensor):
def __init__(self, dimension, spatial_size): def __init__(self, dimension, spatial_size):
SparseConvNetTensor.__init__(self, None, None, spatial_size)
self.dimension = dimension self.dimension = dimension
self.spatial_size = toLongTensor(dimension, spatial_size) self.spatial_size = toLongTensor(dimension, spatial_size)
SparseConvNetTensor.__init__(self, None, None, spatial_size)
self.features = torch.FloatTensor() self.features = torch.FloatTensor()
self.metadata = Metadata(dimension) self.metadata = Metadata(dimension)
dim_fn(dimension, 'setInputSpatialSize')( dim_fn(dimension, 'setInputSpatialSize')(
self.metadata.ffi, self.spatial_size) self.metadata.ffi, self.spatial_size)
def addSample(self): def add_sample(self):
dim_fn(self.dimension, 'batchAddSample')( dim_fn(self.dimension, 'batchAddSample')(
self.metadata.ffi) self.metadata.ffi)
def setLocation(self, location, vector, overwrite=False): def set_location(self, location, vector, overwrite=False):
assert location.min() >= 0 and (self.spatial_size - location).min() > 0 assert location.min() >= 0 and (self.spatial_size - location).min() > 0
dim_fn(self.dimension, 'setInputSpatialLocation')( dim_fn(self.dimension, 'setInputSpatialLocation')(
self.metadata.ffi, self.features, location, vector, overwrite) self.metadata.ffi, self.features, location, vector, overwrite)
def setLocation_(self, location, vector, overwrite=False): def set_location_(self, location, vector, overwrite=False):
dim_fn(self.dimension, 'setInputSpatialLocation')( dim_fn(self.dimension, 'setInputSpatialLocation')(
self.metadata.ffi, self.features, location, vector, overwrite) self.metadata.ffi, self.features, location, vector, overwrite)
def setLocations(self, locations, vectors, overwrite=False): def set_locations(self, locations, vectors, overwrite=False):
""" """
To set n locations in d dimensions, locations can be To set n locations in d dimensions, locations can be
- A size (n,d) LongTensor, giving d-dimensional coordinates -- points - A size (n,d) LongTensor, giving d-dimensional coordinates -- points
...@@ -57,6 +57,53 @@ class InputBatch(SparseConvNetTensor): ...@@ -57,6 +57,53 @@ class InputBatch(SparseConvNetTensor):
dim_fn(self.dimension, 'setInputSpatialLocations')( dim_fn(self.dimension, 'setInputSpatialLocations')(
self.metadata.ffi, self.features, locations, vectors, overwrite) self.metadata.ffi, self.features, locations, vectors, overwrite)
def set_locations_(self, locations, vector, overwrite=False):
dim_fn(self.dimension, 'setInputSpatialLocations')(
self.metadata.ffi, self.features, locations, vectors, overwrite)
def add_sample_from_tensor(self, tensor, offset, threshold=0):
self.nActive = dim_fn(
self.dimension,
'addSampleFromThresholdedTensor')(
self.metadata.ffi,
self.features,
tensor,
offset,
self.spatial_size,
threshold)
def precompute_metadata(self, size):
"""
Optional.
Allows precomputation of 'rulebooks' in data loading threads.
Use size == 2 if downsizing with size-2 stride-2 operations
Use size == 3 if downsizing with size-3 stride-2 operations
"""
if size == 2:
dim_fn(self.dimension, 'generateRuleBooks2s2')(self.metadata.ffi)
if size == 3 :
dim_fn(self.dimension, 'generateRuleBooks3s2')(self.metadata.ffi)
"Deprecated method names."
def addSample(self):
dim_fn(self.dimension, 'batchAddSample')(
self.metadata.ffi)
def setLocation(self, location, vector, overwrite=False):
assert location.min() >= 0 and (self.spatial_size - location).min() > 0
dim_fn(self.dimension, 'setInputSpatialLocation')(
self.metadata.ffi, self.features, location, vector, overwrite)
def setLocation_(self, location, vector, overwrite=False):
dim_fn(self.dimension, 'setInputSpatialLocation')(
self.metadata.ffi, self.features, location, vector, overwrite)
def setLocations(self, locations, vectors, overwrite=False):
l = locations[:, :self.dimension]
assert l.min() >= 0 and (self.spatial_size.expand_as(l) - l).min() > 0
dim_fn(self.dimension, 'setInputSpatialLocations')(
self.metadata.ffi, self.features, locations, vectors, overwrite)
def setLocations_(self, locations, vector, overwrite=False): def setLocations_(self, locations, vector, overwrite=False):
dim_fn(self.dimension, 'setInputSpatialLocations')( dim_fn(self.dimension, 'setInputSpatialLocations')(
self.metadata.ffi, self.features, locations, vectors, overwrite) self.metadata.ffi, self.features, locations, vectors, overwrite)
......
...@@ -18,7 +18,7 @@ class Convolution(SparseModule): ...@@ -18,7 +18,7 @@ class Convolution(SparseModule):
self.nIn = nIn self.nIn = nIn
self.nOut = nOut self.nOut = nOut
self.filter_size = toLongTensor(dimension, filter_size) self.filter_size = toLongTensor(dimension, filter_size)
self.filter_volume = self.filter_size.prod() self.filter_volume = self.filter_size.prod().item()
self.filter_stride = toLongTensor(dimension, filter_stride) self.filter_stride = toLongTensor(dimension, filter_stride)
std = (2.0 / nIn / self.filter_volume)**0.5 std = (2.0 / nIn / self.filter_volume)**0.5
self.weight = torch.Tensor( self.weight = torch.Tensor(
...@@ -92,14 +92,14 @@ class Convolution(SparseModule): ...@@ -92,14 +92,14 @@ class Convolution(SparseModule):
s = 'Convolution ' + str(self.nIn) + '->' + str(self.nOut) + ' C' s = 'Convolution ' + str(self.nIn) + '->' + str(self.nOut) + ' C'
if self.filter_size.max() == self.filter_size.min() and\ if self.filter_size.max() == self.filter_size.min() and\
self.filter_stride.max() == self.filter_stride.min(): self.filter_stride.max() == self.filter_stride.min():
s = s + str(self.filter_size[0]) + '/' + str(self.filter_stride[0]) s = s + str(self.filter_size[0].item()) + '/' + str(self.filter_stride[0].item())
else: else:
s = s + '(' + str(self.filter_size[0]) s = s + '(' + str(self.filter_size[0].item())
for i in self.filter_size[1:]: for i in self.filter_size[1:]:
s = s + ',' + str(i) s = s + ',' + str(i.item())
s = s + ')/(' + str(self.filter_stride[0]) s = s + ')/(' + str(self.filter_stride[0].item())
for i in self.filter_stride[1:]: for i in self.filter_stride[1:]:
s = s + ',' + str(i) s = s + ',' + str(i.item())
s = s + ')' s = s + ')'
return s return s
......
...@@ -21,7 +21,7 @@ class Deconvolution(SparseModule): ...@@ -21,7 +21,7 @@ class Deconvolution(SparseModule):
self.nOut = nOut self.nOut = nOut
self.filter_size = toLongTensor(dimension, filter_size) self.filter_size = toLongTensor(dimension, filter_size)
self.filter_stride = toLongTensor(dimension, filter_stride) self.filter_stride = toLongTensor(dimension, filter_stride)
self.filter_volume = self.filter_size.prod() self.filter_volume = self.filter_size.prod().item()
std = (2.0 / nIn / self.filter_volume)**0.5 std = (2.0 / nIn / self.filter_volume)**0.5
self.weight = torch.Tensor( self.weight = torch.Tensor(
nIn * self.filter_volume, nOut nIn * self.filter_volume, nOut
...@@ -91,14 +91,14 @@ class Deconvolution(SparseModule): ...@@ -91,14 +91,14 @@ class Deconvolution(SparseModule):
s = 'Deconvolution ' + str(self.nIn) + '->' + str(self.nOut) + ' C' s = 'Deconvolution ' + str(self.nIn) + '->' + str(self.nOut) + ' C'
if self.filter_size.max() == self.filter_size.min() and\ if self.filter_size.max() == self.filter_size.min() and\
self.filter_stride.max() == self.filter_stride.min(): self.filter_stride.max() == self.filter_stride.min():
s = s + str(self.filter_size[0]) + '/' + str(self.filter_stride[0]) s = s + str(self.filter_size[0].item()) + '/' + str(self.filter_stride[0].item())
else: else:
s = s + '(' + str(self.filter_size[0]) s = s + '(' + str(self.filter_size[0].item())
for i in self.filter_size[1:]: for i in self.filter_size[1:]:
s = s + ',' + str(i) s = s + ',' + str(i.item())
s = s + ')/(' + str(self.filter_stride[0]) s = s + ')/(' + str(self.filter_stride[0].item())
for i in self.filter_stride[1:]: for i in self.filter_stride[1:]:
s = s + ',' + str(i) s = s + ',' + str(i.item())
s = s + ')' s = s + ')'
return s return s
......
...@@ -18,7 +18,7 @@ class SubmanifoldConvolution(SparseModule): ...@@ -18,7 +18,7 @@ class SubmanifoldConvolution(SparseModule):
self.nIn = nIn self.nIn = nIn
self.nOut = nOut self.nOut = nOut
self.filter_size = toLongTensor(dimension, filter_size) self.filter_size = toLongTensor(dimension, filter_size)
self.filter_volume = self.filter_size.prod() self.filter_volume = self.filter_size.prod().item()
std = (2.0 / nIn / self.filter_volume)**0.5 std = (2.0 / nIn / self.filter_volume)**0.5
self.weight = torch.Tensor( self.weight = torch.Tensor(
nIn * self.filter_volume, nOut nIn * self.filter_volume, nOut
...@@ -87,10 +87,10 @@ class SubmanifoldConvolution(SparseModule): ...@@ -87,10 +87,10 @@ class SubmanifoldConvolution(SparseModule):
s = 'SubmanifoldConvolution ' + \ s = 'SubmanifoldConvolution ' + \
str(self.nIn) + '->' + str(self.nOut) + ' C' str(self.nIn) + '->' + str(self.nOut) + ' C'
if self.filter_size.max() == self.filter_size.min(): if self.filter_size.max() == self.filter_size.min():
s = s + str(self.filter_size[0]) s = s + str(self.filter_size[0].item())
else: else:
s = s + '(' + str(self.filter_size[0]) s = s + '(' + str(self.filter_size[0].item())
for i in self.filter_size[1:]: for i in self.filter_size[1:]:
s = s + ',' + str(i) s = s + ',' + str(i.item())
s = s + ')' s = s + ')'
return s return s
...@@ -51,9 +51,9 @@ class SparseConvNetTensor(object): ...@@ -51,9 +51,9 @@ class SparseConvNetTensor(object):
def __repr__(self): def __repr__(self):
return 'SparseConvNetTensor<<' + \ return 'SparseConvNetTensor<<' + \
repr(self.features) + \ 'features=' + repr(self.features) + \
repr(self.get_spatial_locations() if self.metadata else None) + \ 'coordinates=' + repr(self.get_spatial_locations() if self.metadata else None) + \
repr(self.spatial_size) + \ 'spatial size=' + repr(self.spatial_size) + \
'>>' '>>'
def to_variable(self, requires_grad=False, volatile=False): def to_variable(self, requires_grad=False, volatile=False):
......
...@@ -52,13 +52,13 @@ class SparseToDenseFunction(Function): ...@@ -52,13 +52,13 @@ class SparseToDenseFunction(Function):
input_features, spatial_size = ctx.saved_tensors input_features, spatial_size = ctx.saved_tensors
dim_typed_fn( dim_typed_fn(
ctx.dimension, ctx.dimension,
input_features, input_features.contiguous(),
'SparseToDense_updateGradInput')( 'SparseToDense_updateGradInput')(
spatial_size, spatial_size,
ctx.input_metadata.ffi, ctx.input_metadata.ffi,
input_features, input_features,
grad_input, grad_input,
grad_output, grad_output.contiguous(),
torch.cuda.IntTensor() if input_features.is_cuda else nullptr) torch.cuda.IntTensor() if input_features.is_cuda else nullptr)
return grad_input, None, None, None, None return grad_input, None, None, None, None
......
...@@ -12,6 +12,56 @@ from torch.nn import Module, Parameter ...@@ -12,6 +12,56 @@ from torch.nn import Module, Parameter
from .utils import * from .utils import *
from .sparseConvNetTensor import SparseConvNetTensor from .sparseConvNetTensor import SparseConvNetTensor
class SubmanifoldConvolution(Module):
def __init__(self, dimension, nIn, nOut, filter_size, bias):
Module.__init__(self)
self.dimension = dimension
self.nIn = nIn
self.nOut = nOut
self.filter_size = toLongTensor(dimension, filter_size)
self.filter_volume = self.filter_size.prod().item()
std = (2.0 / nIn / self.filter_volume)**0.5
self.weight = Parameter(torch.Tensor(
nIn * self.filter_volume, nOut
).normal_(0, std))
if bias:
self.bias = Parameter(torch.Tensor(nOut).zero_())
else:
self.bias = None
def forward(self, input):
assert input.features.ndimension() == 0 or input.features.size(1) == self.nIn
output = SparseConvNetTensor()
output.metadata = input.metadata
output.spatial_size = input.spatial_size
output.features = SubmanifoldConvolutionFunction.apply(
input.features,
self.weight,
self.bias,
input.metadata,
input.spatial_size,
self.dimension,
self.filter_size)
return output
def __repr__(self):
s = 'SubmanifoldConvolution ' + \
str(self.nIn) + '->' + str(self.nOut) + ' C'
if self.filter_size.max() == self.filter_size.min():
s = s + str(self.filter_size[0].item())
else:
s = s + '(' + str(self.filter_size[0].item())
for i in self.filter_size[1:]:
s = s + ',' + str(i.item())
s = s + ')'
return s
def input_spatial_size(self, out_size):
return out_size
class ValidConvolution(SubmanifoldConvolution):
pass
class SubmanifoldConvolutionFunction(Function): class SubmanifoldConvolutionFunction(Function):
@staticmethod @staticmethod
...@@ -26,11 +76,6 @@ class SubmanifoldConvolutionFunction(Function): ...@@ -26,11 +76,6 @@ class SubmanifoldConvolutionFunction(Function):
filter_size): filter_size):
ctx.input_metadata = input_metadata ctx.input_metadata = input_metadata
ctx.dimension = dimension ctx.dimension = dimension
# ctx.input_features=input_features
# ctx.spatial_size=spatial_size
# ctx.weight=weight
# ctx.bias=bias
# ctx.filter_size=filter_size
output_features = input_features.new() output_features = input_features.new()
ctx.save_for_backward( ctx.save_for_backward(
input_features, input_features,
...@@ -76,55 +121,3 @@ class SubmanifoldConvolutionFunction(Function): ...@@ -76,55 +121,3 @@ class SubmanifoldConvolutionFunction(Function):
0, # remove this parameter 0, # remove this parameter
torch.cuda.IntTensor() if input_features.is_cuda else nullptr) torch.cuda.IntTensor() if input_features.is_cuda else nullptr)
return grad_input, grad_weight, grad_bias, None, None, None, None return grad_input, grad_weight, grad_bias, None, None, None, None
class SubmanifoldConvolution(Module):
def __init__(self, dimension, nIn, nOut, filter_size, bias):
Module.__init__(self)
self.dimension = dimension
self.nIn = nIn
self.nOut = nOut
self.filter_size = toLongTensor(dimension, filter_size)
self.filter_volume = self.filter_size.prod().item()
std = (2.0 / nIn / self.filter_volume)**0.5
self.weight = Parameter(torch.Tensor(
nIn * self.filter_volume, nOut
).normal_(0, std))
if bias:
self.bias = Parameter(torch.Tensor(nOut).zero_())
else:
self.bias = None
def forward(self, input):
assert input.features.ndimension() == 0 or input.features.size(1) == self.nIn
output = SparseConvNetTensor()
output.metadata = input.metadata
output.spatial_size = input.spatial_size
output.features = SubmanifoldConvolutionFunction.apply(
input.features,
self.weight,
self.bias,
input.metadata,
input.spatial_size,
self.dimension,
self.filter_size)
return output
def __repr__(self):
s = 'SubmanifoldConvolution ' + \
str(self.nIn) + '->' + str(self.nOut) + ' C'
if self.filter_size.max() == self.filter_size.min():
s = s + str(self.filter_size[0].item())
else:
s = s + '(' + str(self.filter_size[0].item())
for i in self.filter_size[1:]:
s = s + ',' + str(i.item())
s = s + ')'
return s
def input_spatial_size(self, out_size):
return out_size
class ValidConvolution(SubmanifoldConvolution):
pass
...@@ -85,17 +85,17 @@ msg = [ ...@@ -85,17 +85,17 @@ msg = [
" X X X X X X X X X X X X X X X X X X ", " X X X X X X X X X X X X X X X X X X ",
" X X XXX XXX XXX XX X X XX X X XXX XXX "] " X X XXX XXX XXX XX X X XX X X XXX XXX "]
#Add a sample using setLocation #Add a sample using set_location
input.addSample() input.add_sample()
for y, line in enumerate(msg): for y, line in enumerate(msg):
for x, c in enumerate(line): for x, c in enumerate(line):
if c == 'X': if c == 'X':
location = torch.LongTensor([x, y]) location = torch.LongTensor([x, y])
featureVector = torch.FloatTensor([1]) featureVector = torch.FloatTensor([1])
input.setLocation(location, featureVector, 0) input.set_location(location, featureVector, 0)
#Add a sample using setLocations #Add a sample using set_locations
input.addSample() input.add_sample()
locations = [] locations = []
features = [] features = []
for y, line in enumerate(msg): for y, line in enumerate(msg):
...@@ -105,7 +105,7 @@ for y, line in enumerate(msg): ...@@ -105,7 +105,7 @@ for y, line in enumerate(msg):
features.append([1]) features.append([1])
locations = torch.LongTensor(locations) locations = torch.LongTensor(locations)
features = torch.FloatTensor(features) features = torch.FloatTensor(features)
input.setLocations(locations, features, 0) input.set_locations(locations, features, 0)
# Optional: allow metadata preprocessing to be done in batch preparation threads # Optional: allow metadata preprocessing to be done in batch preparation threads
# to improve GPU utilization. # to improve GPU utilization.
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
import torch import torch
import torch.legacy.nn as nn import torch.legacy.nn as nn
import sparseconvnet.legacy as scn import sparseconvnet.legacy as scn
from data import getIterators from data import get_iterators
# Use the GPU if there is one, otherwise CPU # Use the GPU if there is one, otherwise CPU
dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor' dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor'
...@@ -38,7 +38,7 @@ print(model) ...@@ -38,7 +38,7 @@ print(model)
spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1])) spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1]))
print('input spatial size', spatial_size) print('input spatial size', spatial_size)
dataset = getIterators(spatial_size, 63, 2) dataset = get_iterators(spatial_size, 63, 2)
scn.ClassificationTrainValidate( scn.ClassificationTrainValidate(
model, dataset, model, dataset,
{'nEpochs': 100, 'initial_LR': 0.1, 'LR_decay': 0.05, 'weightDecay': 1e-4}) {'nEpochs': 100, 'initial_LR': 0.1, 'LR_decay': 0.05, 'weightDecay': 1e-4})
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
import torch import torch
import torch.nn as nn import torch.nn as nn
import sparseconvnet as scn import sparseconvnet as scn
from data import getIterators from data import get_iterators
# two-dimensional SparseConvNet # two-dimensional SparseConvNet
...@@ -38,7 +38,7 @@ class Model(nn.Module): ...@@ -38,7 +38,7 @@ class Model(nn.Module):
model = Model() model = Model()
spatial_size = model.sparseModel.input_spatial_size(torch.LongTensor([1, 1])) spatial_size = model.sparseModel.input_spatial_size(torch.LongTensor([1, 1]))
print('Input spatial size:', spatial_size) print('Input spatial size:', spatial_size)
dataset = getIterators(spatial_size, 63, 3) dataset = get_iterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate( scn.ClassificationTrainValidate(
model, dataset, model, dataset,
{'n_epochs': 100, {'n_epochs': 100,
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
import torch import torch
import torch.legacy.nn as nn import torch.legacy.nn as nn
import sparseconvnet.legacy as scn import sparseconvnet.legacy as scn
from data import getIterators from data import get_iterators
# Use the GPU if there is one, otherwise CPU # Use the GPU if there is one, otherwise CPU
dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor' dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor'
...@@ -35,7 +35,7 @@ print([x.size() for x in model.parameters()[0]]) ...@@ -35,7 +35,7 @@ print([x.size() for x in model.parameters()[0]])
spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1])) spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1]))
print('input spatial size', spatial_size) print('input spatial size', spatial_size)
dataset = getIterators(spatial_size, 63, 3) dataset = get_iterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate( scn.ClassificationTrainValidate(
model, dataset, model, dataset,
{'nEpochs': 100, 'initial_LR': 0.1, 'LR_decay': 0.05, 'weightDecay': 1e-4}) {'nEpochs': 100, 'initial_LR': 0.1, 'LR_decay': 0.05, 'weightDecay': 1e-4})
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
import torch import torch
import torch.nn as nn import torch.nn as nn
import sparseconvnet as scn import sparseconvnet as scn
from data import getIterators from data import get_iterators
# two-dimensional SparseConvNet # two-dimensional SparseConvNet
class Model(nn.Module): class Model(nn.Module):
...@@ -33,7 +33,7 @@ class Model(nn.Module): ...@@ -33,7 +33,7 @@ class Model(nn.Module):
model = Model() model = Model()
spatial_size = model.sparseModel.input_spatial_size(torch.LongTensor([1, 1])) spatial_size = model.sparseModel.input_spatial_size(torch.LongTensor([1, 1]))
print('Input spatial size:', spatial_size) print('Input spatial size:', spatial_size)
dataset = getIterators(spatial_size, 63, 3) dataset = get_iterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate( scn.ClassificationTrainValidate(
model, dataset, model, dataset,
{'n_epochs': 100, {'n_epochs': 100,
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
import torch import torch
import torch.legacy.nn as nn import torch.legacy.nn as nn
import sparseconvnet.legacy as scn import sparseconvnet.legacy as scn
from data import getIterators from data import get_iterators
# Use the GPU if there is one, otherwise CPU # Use the GPU if there is one, otherwise CPU
dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor' dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor'
...@@ -32,7 +32,7 @@ print(model) ...@@ -32,7 +32,7 @@ print(model)
spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1])) spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1]))
print('input spatial size', spatial_size) print('input spatial size', spatial_size)
dataset = getIterators(spatial_size, 63, 3) dataset = get_iterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate(model, scn.ClassificationTrainValidate(model,
dataset, dataset,
{'nEpochs': 100, {'nEpochs': 100,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment