Commit f6e15d2f authored by Benjamin Thomas Graham's avatar Benjamin Thomas Graham
Browse files

Fixes

parent b862d6a2
......@@ -4,5 +4,7 @@
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#export TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.5"
rm -rf build/ dist/ sparseconvnet.egg-info
python setup.py install && python examples/hello-world.py
......@@ -5,6 +5,6 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#export TORCH_CUDA_ARCH_LIST="6.0;6.1;6.2;7.0;7.5"
rm -rf build/ dist/ sparseconvnet.egg-info sparseconvnet_SCN*.so
python setup.py develop
python examples/hello-world.py
python setup.py develop && python examples/hello-world.py
......@@ -14,8 +14,7 @@ if torch.cuda.is_available():
this_dir = os.path.dirname(os.path.realpath(__file__))
torch_dir = os.path.dirname(torch.__file__)
conda_include_dir = '/'.join(torch_dir.split('/')[:-4]) + '/include'
extra = {'cxx': ['-std=c++11', '-fopenmp'], 'nvcc': ['-std=c++11', '-Xcompiler', '-fopenmp']}
extra = {'cxx': ['-std=c++14', '-fopenmp'], 'nvcc': ['-std=c++14', '-Xcompiler', '-fopenmp']}
setup(
name='sparseconvnet',
......@@ -29,12 +28,12 @@ setup(
CUDAExtension('sparseconvnet.SCN',
[
'sparseconvnet/SCN/cuda.cu', 'sparseconvnet/SCN/sparseconvnet_cuda.cpp', 'sparseconvnet/SCN/pybind.cpp'],
include_dirs=[conda_include_dir, this_dir+'/sparseconvnet/SCN/'],
include_dirs=[this_dir+'/sparseconvnet/SCN/'],
extra_compile_args=extra)
if torch.cuda.is_available() else
CppExtension('sparseconvnet.SCN',
['sparseconvnet/SCN/pybind.cpp', 'sparseconvnet/SCN/sparseconvnet_cpu.cpp'],
include_dirs=[conda_include_dir, this_dir+'/sparseconvnet/SCN/'],
include_dirs=[this_dir+'/sparseconvnet/SCN/'],
extra_compile_args=extra['cxx'])],
cmdclass={'build_ext': BuildExtension},
zip_safe=False,
......
......@@ -216,7 +216,6 @@ void blRules(SparseGrids<dimension> &SGs, RuleBook &rules, long *coords,
rules[0].push_back(length);
rules[0].push_back(nActive);
auto &rule = rules[1];
if (mode == 1) {
rule.resize(2 * nActive);
#pragma omp parallel for private(I)
for (I = 0; I < batchSize; I++) {
......@@ -228,7 +227,6 @@ void blRules(SparseGrids<dimension> &SGs, RuleBook &rules, long *coords,
rr += 2;
}
}
}
return;
}
......
......@@ -15,7 +15,7 @@ class BatchNormalization(Module):
Parameters:
nPlanes : number of input planes
eps : small number used to stabilise standard deviation calculation
momentum : for calculating running average for testing (default 0.9)
momentum : for calculating running average for testing (default 0.99)
affine : only 'true' is supported at present (default 'true')
noise : add multiplicative and additive noise during training if >0.
leakiness : Apply activation def inplace: 0<=leakiness<=1.
......@@ -25,7 +25,7 @@ class BatchNormalization(Module):
self,
nPlanes,
eps=1e-4,
momentum=0.9,
momentum=0.99,
affine=True,
leakiness=1):
Module.__init__(self)
......@@ -72,7 +72,7 @@ class BatchNormalization(Module):
class BatchNormReLU(BatchNormalization):
def __init__(self, nPlanes, eps=1e-4, momentum=0.9):
def __init__(self, nPlanes, eps=1e-4, momentum=0.99):
BatchNormalization.__init__(self, nPlanes, eps, momentum, True, 0)
def __repr__(self):
......@@ -82,7 +82,7 @@ class BatchNormReLU(BatchNormalization):
class BatchNormLeakyReLU(BatchNormalization):
def __init__(self, nPlanes, eps=1e-4, momentum=0.9, leakiness=0.333):
def __init__(self, nPlanes, eps=1e-4, momentum=0.99, leakiness=0.333):
BatchNormalization.__init__(self, nPlanes, eps, momentum, True, leakiness)
def __repr__(self):
......@@ -166,7 +166,7 @@ class MeanOnlyBNLeakyReLU(Module):
"""
Parameters:
nPlanes : number of input planes
momentum : for calculating running average for testing (default 0.9)
momentum : for calculating running average for testing (default 0.99)
leakiness : Apply activation def inplace: 0<=leakiness<=1.
0 for ReLU, values in (0,1) for LeakyReLU, 1 for no activation def.
"""
......@@ -175,7 +175,7 @@ class MeanOnlyBNLeakyReLU(Module):
nPlanes,
affine=True,
leakiness=1,
momentum=0.9):
momentum=0.99):
Module.__init__(self)
self.nPlanes = nPlanes
self.momentum = momentum
......
......@@ -318,7 +318,7 @@ def FullConvolutionalNetIntegratedLinear(dimension, reps, nPlanes, nClasses=-1,
return x+nPlanes
def foo(m,np):
for _ in range(reps):
if residual_blocks: #ResNet style blocks
if residual: #ResNet style blocks
m.add(scn.ConcatTable()
.add(scn.Identity())
.add(scn.Sequential()
......@@ -333,7 +333,7 @@ def FullConvolutionalNetIntegratedLinear(dimension, reps, nPlanes, nClasses=-1,
def bar(m,nPlanes,bias):
m.add(scn.BatchNormLeakyReLU(nPlanes,leakiness=leakiness))
m.add(scn.NetworkInNetwork(nPlanes,nClasses,bias)) #accumulte softmax input, only one set of biases
def baz(depth,nPlanes):
def baz(nPlanes):
m=scn.Sequential()
foo(m,nPlanes[0])
if len(nPlanes)==1:
......@@ -348,4 +348,4 @@ def FullConvolutionalNetIntegratedLinear(dimension, reps, nPlanes, nClasses=-1,
scn.UnPooling(dimension, downsample[0], downsample[1]))
m.add(ConcatTable(a,b))
m.add(scn.AddTable())
return baz(depth,nPlanes)
return baz(nPlanes)
......@@ -5,6 +5,7 @@
# LICENSE file in the root directory of this source tree.
import torch, torch.utils.checkpoint
from .utils import checkpoint101
class Sequential(torch.nn.Sequential):
def input_spatial_size(self, out_size):
......@@ -12,6 +13,14 @@ class Sequential(torch.nn.Sequential):
out_size = self._modules[m].input_spatial_size(out_size)
return out_size
def __add__(self, x):
r = Sequential()
for m in self:
r.append(m)
for m in x:
r.append(m)
return r
def add(self, module):
self._modules[str(len(self._modules))] = module
return self
......
......@@ -127,7 +127,7 @@ def batch_location_tensors(location_tensors):
def prepare_BLInput(l,f):
with torch.no_grad():
n=max([x.size(0) for x in l])
L=torch.empty(len(l),n,l[0].size(1)).fill_(-1)
L=torch.empty(len(l),n,l[0].size(1),dtype=torch.int64).fill_(-1)
F=torch.zeros(len(l),n,f[0].size(1))
for i, (ll, ff) in enumerate(zip(l,f)):
L[i,:ll.size(0),:].copy_(ll)
......@@ -156,6 +156,9 @@ def checkpoint_restore(model,exp_name,name2,use_cuda=True,epoch=0):
def is_power2(num):
return num != 0 and ((num & (num - 1)) == 0)
def is_square(num):
return int(num**0.5+0.5)**2==num
def has_only_one_nonzero_digit(num): #https://oeis.org/A037124
return num != 0 and (num/10**math.floor(math.log(num,10))).is_integer()
......@@ -291,9 +294,37 @@ def matplotlib_planes(ax, positions,colors):
pass
ax.set_axis_off()
def visdom_scatter(vis, xyz, rgb, win='3d', markersize=3):
def visdom_scatter(vis, xyz, rgb, win='3d', markersize=3, title=''):
rgb=rgb.detach()
rgb-=rgb.min()
rgb/=rgb.max()/255+1e-10
rgb=rgb.floor().cpu().numpy()
vis.scatter(
xyz,
opts={'markersize': markersize,'markercolor': rgb},
xyz.detach().cpu().numpy(),
opts={'markersize': markersize,'markercolor': rgb, 'title': title},
win=win)
def ply_scatter(name, xyz, rgb):
rgb=rgb.detach()
rgb-=rgb.min()
rgb/=rgb.max()/255+1e-10
rgb=rgb.floor().cpu().numpy()
with open(name+'.ply','w') as f:
print("""ply
format ascii 1.0
element vertex %d
property float x
property float y
property float z
property uchar red
property uchar green
property uchar blue
end_header"""%(xyz.size(0)), file = f)
for (x,y,z),(r,g,b) in zip(xyz,rgb):
print('%d %d %d %d %d %d'%(x,y,z,r,g,b),file=f)
class VerboseIdentity(torch.nn.Module):
def forward(self, x):
print(x)
return x
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment