Commit d77687a6 authored by Benjamin Graham's avatar Benjamin Graham Committed by Benjamin Thomas Graham
Browse files

Rename ValidConvolutions to SubmanifoldConvolutions, update for PyTorch 0.4 Tensor/Variable merge

parent 297e04c0
......@@ -7,7 +7,7 @@
--[[
Assume all the inputs have identical SparseGrids and input[i].features:size(1)
Assume input[1].features:size(2) >= input[i].features:size(2) for all i=1,#input
output.validRules is taken from input[1].validRules (could do set union?)
output.submanifoldRules is taken from input[1].submanifoldRules (could do set union?)
(for resnets, make sure the residual link is input[2])
]]
......
......@@ -8,7 +8,7 @@
Store Metadata relating to which spatial locations are active at each scale.
Convolutions and 'convolution reversing' deconvolutions
all coexist within the same MetaData object as long as each spatial size
only occurs once. Valid convolutions do not change the spatial structure.
only occurs once. Submanifold convolutions do not change the spatial structure.
Serialization is emulated by storing the pointer as an integer.
This is sufficient for mutithreaded batch preparation: each 'serialized'
......
......@@ -45,7 +45,7 @@ return function(sparseconvnet)
function sparseconvnet.SparseVggNet(dimension,nInputPlanes,layers,opts)
--[[
VGG style nets
Use valid convolutions
Use submanifold convolutions
Also implements 'Plus'-augmented nets
]]
local nPlanes=nInputPlanes
......@@ -57,20 +57,20 @@ return function(sparseconvnet)
elseif x[1] == 'MP' then
m:add(sparseconvnet.MaxPooling(dimension,x[2],x[3]))
elseif x[1]=='C' and #x==2 then
m:add(sparseconvnet.ValidConvolution(dimension,nPlanes,x[2],3,false))
m:add(sparseconvnet.SubmanifoldConvolution(dimension,nPlanes,x[2],3,false))
nPlanes=x[2]
m:add(sparseconvnet.BatchNormReLU(nPlanes))
elseif x[1]=='C' and #x==3 then
m:add(sparseconvnet.ConcatTable()
:add(
sparseconvnet.Sequential()
:add(sparseconvnet.ValidConvolution(dimension,nPlanes,x[2],3,false))
:add(sparseconvnet.SubmanifoldConvolution(dimension,nPlanes,x[2],3,false))
)
:add(
sparseconvnet.Sequential()
:add(sparseconvnet.Convolution(dimension,nPlanes,x[3],3,2,false))
:add(sparseconvnet.BatchNormReLU(x[3]))
:add(sparseconvnet.ValidConvolution(dimension,x[3],x[3],3,false))
:add(sparseconvnet.SubmanifoldConvolution(dimension,x[3],x[3],3,false))
:add(sparseconvnet.BatchNormReLU(x[3]))
:add(sparseconvnet.Deconvolution(dimension,x[3],x[3],3,2,false))
))
......@@ -81,28 +81,28 @@ return function(sparseconvnet)
m:add(sparseconvnet.ConcatTable()
:add(
sparseconvnet.Sequential()
:add(sparseconvnet.ValidConvolution(dimension,nPlanes,x[2],3,false))
:add(sparseconvnet.SubmanifoldConvolution(dimension,nPlanes,x[2],3,false))
)
:add(
sparseconvnet.Sequential()
:add(sparseconvnet.Convolution(dimension,nPlanes,x[3],3,2,false))
:add(sparseconvnet.BatchNormReLU(x[3]))
:add(sparseconvnet.ValidConvolution(dimension,x[3],x[3],3,false))
:add(sparseconvnet.SubmanifoldConvolution(dimension,x[3],x[3],3,false))
:add(sparseconvnet.BatchNormReLU(x[3]))
:add(sparseconvnet.Deconvolution(dimension,x[3],x[3],3,2,false))
)
:add(sparseconvnet.Sequential()
:add(sparseconvnet.Convolution(dimension,nPlanes,x[4],3,2,false))
:add(sparseconvnet.BatchNormReLU(x[4]))
:add(sparseconvnet.ValidConvolution(dimension,x[4],x[4],3,false))
:add(sparseconvnet.SubmanifoldConvolution(dimension,x[4],x[4],3,false))
:add(sparseconvnet.BatchNormReLU(x[4]))
:add(sparseconvnet.Convolution(dimension,x[4],x[4],3,2,false))
:add(sparseconvnet.BatchNormReLU(x[4]))
:add(sparseconvnet.ValidConvolution(dimension,x[4],x[4],3,false))
:add(sparseconvnet.SubmanifoldConvolution(dimension,x[4],x[4],3,false))
:add(sparseconvnet.BatchNormReLU(x[4]))
:add(sparseconvnet.Deconvolution(dimension,x[4],x[4],3,2,false))
:add(sparseconvnet.BatchNormReLU(x[4]))
:add(sparseconvnet.ValidConvolution(dimension,x[4],x[4],3,false))
:add(sparseconvnet.SubmanifoldConvolution(dimension,x[4],x[4],3,false))
:add(sparseconvnet.BatchNormReLU(x[4]))
:add(sparseconvnet.Deconvolution(dimension,x[4],x[4],3,2,false))
))
......@@ -138,10 +138,10 @@ return function(sparseconvnet)
:add( --convolutional connection
sparseconvnet.Sequential()
:add(stride==1 and
sparseconvnet.ValidConvolution(dimension,nPlanes,n,3,false) or
sparseconvnet.SubmanifoldConvolution(dimension,nPlanes,n,3,false) or
sparseconvnet.Convolution(dimension,nPlanes,n,3,stride,false))
:add(sparseconvnet.BatchNormReLU(n))
:add(sparseconvnet.ValidConvolution(
:add(sparseconvnet.SubmanifoldConvolution(
dimension,n,n,3,false)))
:add(residual(nPlanes,n,stride))
)
......@@ -150,10 +150,10 @@ return function(sparseconvnet)
:add( --convolutional connection
sparseconvnet.Sequential()
:add(sparseconvnet.BatchNormReLU(n))
:add(sparseconvnet.ValidConvolution(
:add(sparseconvnet.SubmanifoldConvolution(
dimension,nPlanes,n,3,false))
:add(sparseconvnet.BatchNormReLU(n))
:add(sparseconvnet.ValidConvolution(
:add(sparseconvnet.SubmanifoldConvolution(
dimension,n,n,3,false))
)
:add(sparseconvnet.Identity())
......@@ -169,7 +169,7 @@ return function(sparseconvnet)
end
function sparseconvnet.SparseDenseNet(dimension,nInputPlanes,layers)
--[[
SparseConvNet meets DenseNets using valid convolutions
SparseConvNet meets DenseNets using submanifold convolutions
Could do with a less confusing name
]]
local nPlanes=nInputPlanes
......
......@@ -8,7 +8,7 @@ return function(sparseconvnet)
local C = sparseconvnet.C
local Convolution, parent = torch.class(
'sparseconvnet.ValidConvolution', 'nn.Module', sparseconvnet)
'sparseconvnet.SubmanifoldConvolution', 'nn.Module', sparseconvnet)
function Convolution:__init(dimension, nInputPlanes, nOutputPlanes,
filterSize, bias)
......@@ -52,7 +52,7 @@ return function(sparseconvnet)
self.output.spatialSize = input.spatialSize
self.shared.forwardPassMultiplyAddCount=
self.shared.forwardPassMultiplyAddCount+
C.dimTypedFn(self.dimension, self._type, 'ValidConvolution_updateOutput')(
C.dimTypedFn(self.dimension, self._type, 'SubmanifoldConvolution_updateOutput')(
input.spatialSize:cdata(),
self.filterSize:cdata(),
input.metadata.ffi,
......@@ -68,7 +68,7 @@ return function(sparseconvnet)
end
function Convolution:backward(input, gradOutput)
C.dimTypedFn(self.dimension, self._type, 'ValidConvolution_backward')(
C.dimTypedFn(self.dimension, self._type, 'SubmanifoldConvolution_backward')(
input.spatialSize:cdata(),
self.filterSize:cdata(),
input.metadata.ffi,
......@@ -100,7 +100,7 @@ return function(sparseconvnet)
end
function Convolution:__tostring()
local s = 'ValidConvolution ' .. self.nInputPlanes .. '->' .. self.nOutputPlanes..' C'
local s = 'SubmanifoldConvolution ' .. self.nInputPlanes .. '->' .. self.nOutputPlanes..' C'
if self.filterSize:max()==self.filterSize:min() and
self.filterStride:max()==self.filterStride:min() then
s=s..self.filterSize[1] ..(self.filterStride[1]==1 and
......
......@@ -35,7 +35,7 @@ for _,module in ipairs({
'sparseconvnet.ReLU',
'sparseconvnet.Sequential',
'sparseconvnet.SparseToDense',
'sparseconvnet.ValidConvolution',
'sparseconvnet.SubmanifoldConvolution',
}) do
require(module)(sparseconvnet)
end
......
......@@ -14,7 +14,7 @@ local sparseModel = sparseconvnet.Sequential()
local denseModel = nn.Sequential()
model:add(sparseModel):add(denseModel)
sparseModel
:add(sparseconvnet.ValidConvolution(2,3,16,3,false))
:add(sparseconvnet.SubmanifoldConvolution(2,3,16,3,false))
:add(sparseconvnet.SparseDenseNet(2,16,{
{'MP',compression=0},
{nExtraLayers=2, growthRate=16},
......
......@@ -17,7 +17,7 @@ model = nn.Sequential()
sparseModel = scn.Sequential()
denseModel = nn.Sequential()
model.add(sparseModel).add(denseModel)
sparseModel.add(scn.ValidConvolution(2, 3, 8, 3, False))\
sparseModel.add(scn.SubmanifoldConvolution(2, 3, 8, 3, False))\
.add(scn.SparseDenseNet(2, 8, [
{'pool': 'MP', 'compression': 0},
{'nExtraLayers': 2, 'growthRate': 8},
......
......@@ -14,7 +14,7 @@ local sparseModel = sparseconvnet.Sequential()
local denseModel = nn.Sequential()
model:add(sparseModel):add(denseModel)
sparseModel
:add(sparseconvnet.ValidConvolution(2,3,8,3,false))
:add(sparseconvnet.SubmanifoldConvolution(2,3,8,3,false))
:add(sparseconvnet.MaxPooling(2,3,2))
:add(sparseconvnet.SparseResNet(
2,8,{
......
......@@ -10,36 +10,40 @@ import sparseconvnet as scn
from data import getIterators
# two-dimensional SparseConvNet
class Model(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.sparseModel=scn.Sequential(
).add(scn.ValidConvolution(2, 3, 8, 3, False)
).add(scn.MaxPooling(2, 3, 2)
).add(scn.SparseResNet(2, 8, [
['b', 8, 2, 1],
['b', 16, 2, 2],
['b', 24, 2, 2],
['b', 32, 2, 2]])
self.sparseModel = scn.Sequential(
).add(scn.SubmanifoldConvolution(2, 3, 8, 3, False)
).add(scn.MaxPooling(2, 3, 2)
).add(scn.SparseResNet(2, 8, [
['b', 8, 2, 1],
['b', 16, 2, 2],
['b', 24, 2, 2],
['b', 32, 2, 2]])
).add(scn.Convolution(2, 32, 64, 5, 1, False)
).add(scn.BatchNormReLU(64)
).add(scn.SparseToDense(2,64))
).add(scn.BatchNormReLU(64)
).add(scn.SparseToDense(2, 64))
self.linear = nn.Linear(64, 183)
def forward(self, x):
x = self.sparseModel(x)
x = x.view(-1,64)
x = x.view(-1, 64)
x = self.linear(x)
return x
model=Model()
model = Model()
spatial_size = model.sparseModel.input_spatial_size(torch.LongTensor([1, 1]))
print('Input spatial size:', spatial_size)
dataset = getIterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate(
model, dataset,
{'n_epochs': 100,
'initial_lr': 0.1,
'lr_decay': 0.05,
'weight_decay': 1e-4,
'use_gpu': torch.cuda.is_available(),
'check_point': True,})
'initial_lr': 0.1,
'lr_decay': 0.05,
'weight_decay': 1e-4,
'use_gpu': torch.cuda.is_available(),
'check_point': True, })
......@@ -17,7 +17,7 @@ model = nn.Sequential()
sparseModel = scn.Sequential()
denseModel = nn.Sequential()
model.add(sparseModel).add(denseModel)
sparseModel.add(scn.ValidConvolution(2, 3, 8, 3, False))
sparseModel.add(scn.SubmanifoldConvolution(2, 3, 8, 3, False))
sparseModel.add(scn.MaxPooling(2, 3, 2))
sparseModel.add(scn.SparseResNet(2, 8, [
['b', 8, 2, 1],
......
......@@ -16,27 +16,29 @@ class Model(nn.Module):
self.sparseModel = scn.SparseVggNet(2, 3, [
['C', 8, ], ['C', 8], 'MP',
['C', 16], ['C', 16], 'MP',
['C', 16, 8], ['C', 16, 8], 'MP',
['C', 24, 8], ['C', 24, 8], 'MP']
).add(scn.Convolution(2, 32, 64, 5, 1, False)
).add(scn.BatchNormReLU(64)
).add(scn.SparseToDense(2,64))
['C', 16 + 8], ['C', 16 + 8], 'MP',
['C', 24 + 8], ['C', 24 + 8], 'MP']
).add(scn.Convolution(2, 32, 64, 5, 1, False)
).add(scn.BatchNormReLU(64)
).add(scn.SparseToDense(2, 64))
self.linear = nn.Linear(64, 183)
def forward(self, x):
x = self.sparseModel(x)
x = x.view(-1,64)
x = x.view(-1, 64)
x = self.linear(x)
return x
model=Model()
model = Model()
spatial_size = model.sparseModel.input_spatial_size(torch.LongTensor([1, 1]))
print('Input spatial size:', spatial_size)
dataset = getIterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate(
model, dataset,
{'n_epochs': 100,
'initial_lr': 0.1,
'lr_decay': 0.05,
'weight_decay': 1e-4,
'use_gpu': torch.cuda.is_available(),
'check_point': True,})
'initial_lr': 0.1,
'lr_decay': 0.05,
'weight_decay': 1e-4,
'use_gpu': torch.cuda.is_available(),
'check_point': True, })
......@@ -33,6 +33,10 @@ print(model)
spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1]))
print('input spatial size', spatial_size)
dataset = getIterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate(
model, dataset,
{'nEpochs': 100, 'initial_LR': 0.1, 'LR_decay': 0.05, 'weightDecay': 1e-4, 'checkPoint': False})
scn.ClassificationTrainValidate(model,
dataset,
{'nEpochs': 100,
'initial_LR': 0.1,
'LR_decay': 0.05,
'weightDecay': 1e-4,
'checkPoint': False})
......@@ -10,7 +10,7 @@ import sparseconvnet.legacy as scn
import pickle
import math
import random
import numpy
import numpy as np
import os
if not os.path.exists('pickle/'):
......@@ -25,6 +25,8 @@ def train(spatial_size, Scale, precomputeStride):
for i in range(9):
for j in range(6588):
d.append(d[j])
for i, x in enumerate(d):
x['idx'] = i
d = torchnet.dataset.ListDataset(d)
randperm = torch.randperm(len(d))
......@@ -36,10 +38,11 @@ def train(spatial_size, Scale, precomputeStride):
center = spatial_size.float().view(1, 2) / 2
p = torch.LongTensor(2)
v = torch.FloatTensor([1, 0, 0])
np_random = np.random.RandomState(tbl['idx'])
for char in tbl['input']:
inp.addSample()
m = torch.eye(2)
r = random.randint(1, 3)
r = np_random.randint(1, 3)
alpha = random.uniform(-0.2, 0.2)
if alpha == 1:
m[0][1] = alpha
......@@ -74,7 +77,7 @@ def train(spatial_size, Scale, precomputeStride):
# v[1]=(x2-x1)/l
# v[2]=(y2-y1)/l
# l=max(x2-x1,y2-y1,x1-x2,y1-y2,0.9)
# for j in numpy.arange(0,1,1/l):
# for j in np.arange(0,1,1/l):
# p[0]=math.floor(x1*j+x2*(1-j))
# p[1]=math.floor(y1*j+y2*(1-j))
# inp.setLocation(p,v,False)
......
......@@ -14,7 +14,7 @@ local denseModel = nn.Sequential()
local model = nn.Sequential():add(sparseModel):add(denseModel)
sparseModel
:add(sparseconvnet.ValidConvolution(2,3,16,3,false))
:add(sparseconvnet.SubmanifoldConvolution(2,3,16,3,false))
:add(sparseconvnet.SparseDenseNet(2,16,{
{'MP',compression=0},
{nExtraLayers=2, growthRate=16},
......
......@@ -17,7 +17,7 @@ model = nn.Sequential()
sparseModel = scn.Sequential()
denseModel = nn.Sequential()
model.add(sparseModel).add(denseModel)
sparseModel.add(scn.ValidConvolution(2, 3, 16, 3, False))\
sparseModel.add(scn.SubmanifoldConvolution(2, 3, 16, 3, False))\
.add(scn.SparseDenseNet(2, 16, [
{'pool': 'MP', 'compression': 0},
{'nExtraLayers': 2, 'growthRate': 16},
......
......@@ -14,7 +14,7 @@ local denseModel = nn.Sequential()
local model = nn.Sequential():add(sparseModel):add(denseModel)
sparseModel
:add(sparseconvnet.ValidConvolution(2,3,16,3,false))
:add(sparseconvnet.SubmanifoldConvolution(2,3,16,3,false))
:add(sparseconvnet.SparseDenseNet(2,16,{
{'MP',compression=0},
{nExtraLayers=4, growthRate=16},
......
......@@ -24,7 +24,7 @@ local denseModel = nn.Sequential()
local model = nn.Sequential():add(sparseModel):add(denseModel)
sparseModel
:add(sparseconvnet.ValidConvolution(2,3,16,3,false))
:add(sparseconvnet.SubmanifoldConvolution(2,3,16,3,false))
:add(sparseconvnet.MaxPooling(2,2,2))
:add(sparseconvnet.SparseResNet(
2,16,{
......
......@@ -17,7 +17,7 @@ model = nn.Sequential()
sparseModel = scn.Sequential()
denseModel = nn.Sequential()
model.add(sparseModel).add(denseModel)
sparseModel.add(scn.ValidConvolution(2, 3, 16, 3, False))
sparseModel.add(scn.SubmanifoldConvolution(2, 3, 16, 3, False))
sparseModel.add(scn.MaxPooling(2, 3, 2))
sparseModel.add(scn.SparseResNet(2, 16, [
['b', 16, 2, 1],
......
......@@ -14,7 +14,7 @@ local denseModel = nn.Sequential()
local model = nn.Sequential():add(sparseModel):add(denseModel)
sparseModel
:add(sparseconvnet.ValidConvolution(2,3,16,3,false))
:add(sparseconvnet.SubmanifoldConvolution(2,3,16,3,false))
:add(sparseconvnet.MaxPooling(2,2,2))
:add(sparseconvnet.SparseResNet(
2,16,{
......
......@@ -24,7 +24,7 @@ local denseModel = nn.Sequential()
local model = nn.Sequential():add(sparseModel):add(denseModel)
sparseModel
:add(sparseconvnet.ValidConvolution(2,3,16,3,false))
:add(sparseconvnet.SubmanifoldConvolution(2,3,16,3,false))
:add(sparseconvnet.MaxPooling(2,2,2))
:add(sparseconvnet.SparseResNet(
2,16,{
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment