Commit 54c58b5f authored by Benjamin Thomas Graham's avatar Benjamin Thomas Graham
Browse files

minor

parent dcd1428d
import numpy as np
import torch
import glob, math, os
import scipy.io
import h5py
import pickle
classes = [
'wall', 'floor', 'cabinet', 'bed',
'chair', 'sofa', 'table', 'door',
'window', 'bookshelf', 'picture', 'counter',
'blinds', 'desk', 'shelves', 'curtain',
'dresser', 'pillow', 'mirror', 'floor mat',
'clothes', 'ceiling', 'books', 'refridgerator',
'television', 'paper', 'towel', 'shower curtain',
'box', 'whiteboard', 'person', 'night stand',
'toilet', 'sink', 'lamp', 'bathtub',
'bag', 'otherstructure', 'otherfurniture', 'otherprop']
corresponding_classes_in_Silberman_labeling = [40, 40, 3, 22, 5, 40, 12, 38, 40, 40, 2, 39, 40, 40, 26, 40, 24,
40, 7, 40, 1, 40, 40, 34, 38, 29, 40, 8, 40, 40, 40, 40, 38, 40,
40, 14, 40, 38, 40, 40, 40, 15, 39, 40, 30, 40, 40, 39, 40, 39, 38,
40, 38, 40, 37, 40, 38, 38, 9, 40, 40, 38, 40, 11, 38, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 38, 13, 40, 40, 6, 40, 23,
40, 39, 10, 16, 40, 40, 40, 40, 38, 40, 40, 40, 40, 40, 40, 40, 40,
40, 38, 40, 39, 40, 40, 40, 40, 39, 38, 40, 40, 40, 40, 40, 40, 18,
40, 40, 19, 28, 33, 40, 40, 40, 40, 40, 40, 40, 40, 40, 38, 27, 36,
40, 40, 40, 40, 21, 40, 20, 35, 40, 40, 40, 40, 40, 40, 40, 40, 38,
40, 40, 40, 4, 32, 40, 40, 39, 40, 39, 40, 40, 40, 40, 40, 17, 40,
40, 25, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39,
40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 38, 38, 40, 40, 39, 40, 39,
40, 38, 39, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 38,
40, 40, 38, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
38, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 39, 40, 40, 40, 38, 40, 40, 39, 40, 40, 38, 40, 40, 40, 40, 40,
40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 31, 40, 40, 40, 40, 40, 40, 40, 38, 40,
40, 38, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 38, 40, 39, 40,
40, 39, 40, 40, 40, 38, 40, 40, 40, 40, 40, 40, 40, 40, 38, 39, 40,
40, 40, 40, 40, 40, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
38, 39, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 38,
40, 40, 40, 38, 40, 39, 40, 40, 40, 39, 39, 40, 40, 40, 40, 40, 40,
40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
39, 39, 40, 40, 39, 39, 40, 40, 40, 40, 38, 40, 40, 38, 39, 39, 40,
39, 40, 39, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40,
38, 40, 39, 40, 40, 40, 40, 40, 39, 39, 40, 40, 40, 40, 40, 40, 39,
39, 40, 40, 38, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 39,
40, 40, 40, 40, 39, 40, 40, 40, 40, 40, 39, 40, 40, 39, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 38, 40, 40, 40,
40, 40, 40, 40, 39, 38, 39, 40, 38, 39, 40, 39, 40, 39, 40, 40, 40,
40, 40, 40, 40, 40, 38, 40, 40, 40, 40, 40, 38, 40, 40, 39, 40, 40,
40, 39, 40, 38, 40, 40, 40, 40, 40, 40, 40, 40, 38, 40, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 38, 40, 40, 38,
40, 40, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 38, 40, 40, 38, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 38, 38, 38, 40, 40, 40, 38,
40, 40, 40, 38, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 40, 40, 40, 38, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 38, 40, 38, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 40, 40, 39, 40, 39, 40, 40, 40, 40, 38, 38, 40, 40, 40, 38,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40,
39, 40, 40, 39, 39, 40, 40, 40, 40, 40, 40, 40, 40, 39, 39, 39, 40,
40, 40, 40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 40, 40,
40, 39, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 40, 38,
40, 40, 40, 40, 40, 40, 40, 39, 40, 40, 38, 40, 39, 40, 40, 40, 40,
38, 40, 40, 40, 40, 40, 38, 40, 40, 40, 40, 40, 40, 40, 39, 40, 40,
40, 40, 40, 40, 40, 40, 40, 39, 40, 40]
print(len(classes),len(corresponding_classes_in_Silberman_labeling))
split=scipy.io.loadmat('splits.mat')['testNdxs']-1 # 0-index
testIdxs=[x for x in range(1449) if x in split]
trainIdxs=[x for x in range(1449) if x not in split]
print(len(trainIdxs),len(testIdxs))
f = h5py.File('nyu_depth_v2_labeled.mat','r')
for i,x in enumerate(trainIdxs):
tc=f.get('images')[x]
td=f.get('depths')[x]*100
td-=td.mean()
print(td.std())
gt=np.array(f.get('labels')[x],dtype='int16')-1
coords=[]
col=[]
cl=[]
for x in range(40,600):
for y in range(45,470):
if gt[x,y]>=0:
cl.append(corresponding_classes_in_Silberman_labeling[gt[x,y]]-1)
coords.append([x-320,y-240,td[x,y]])
col.append([255,tc[0,x,y],tc[1,x,y],tc[2,x,y]])
coords=np.array(coords,dtype='int16')
col=np.array(col,dtype='uint8')
cl=np.array(cl,dtype='int8')
print(coords.shape,col.shape,cl.shape)
pickle.dump([coords,col,cl],open('train'+str(i)+'.pickle','wb'),protocol=pickle.HIGHEST_PROTOCOL)
f = h5py.File('nyu_depth_v2_labeled.mat','r')
for i,x in enumerate(testIdxs):
tc=f.get('images')[x]
td=f.get('depths')[x]*100
td-=td.mean()
print(td.std())
gt=np.array(f.get('labels')[x],dtype='int16')-1
coords=[]
col=[]
cl=[]
for x in range(40,600):
for y in range(45,470):
if gt[x,y]>=0:
cl.append(corresponding_classes_in_Silberman_labeling[gt[x,y]]-1)
coords.append([x-320,y-240,td[x,y]])
col.append([255,tc[0,x,y],tc[1,x,y],tc[2,x,y]])
coords=np.array(coords,dtype='int16')
col=np.array(col,dtype='uint8')
cl=np.array(cl,dtype='int8')
print(coords.shape,col.shape,cl.shape)
pickle.dump([coords,col,cl],open('test'+str(i)+'.pickle','wb'),protocol=pickle.HIGHEST_PROTOCOL)
......@@ -24,8 +24,9 @@ __global__ void AveragePooling_fp(T *input_features, T *output_features,
Int i = r[2 * threadIdx.y] * input_stride;
Int o = r[2 * threadIdx.y + 1] * output_stride;
for (Int plane = threadIdx.x; plane < nPlanes; plane += NTX)
atomicAdd(&output_features[o + plane],
alpha * input_features[i + plane]);
output_features[o + plane]+= alpha * input_features[i + plane];
// atomicAdd(&output_features[o + plane],
// alpha * input_features[i + plane]);
}
__syncthreads();
}
......
......@@ -256,15 +256,14 @@ void Metadata<dimension>::appendMetadata(Metadata<dimension> &mAdd,
template <Int dimension>
std::vector<at::Tensor>
Metadata<dimension>::sparsifyCompare(Metadata<dimension> &mReference,
Metadata<dimension> &mSparsified,
/*long*/ at::Tensor spatialSize) {
auto p = LongTensorToPoint<dimension>(spatialSize);
at::Tensor delta = torch::zeros({nActive[p]}, at::kFloat);
at::Tensor gt = torch::zeros({nActive[p]}, at::kByte);
at::Tensor ref_map = torch::empty({mReference.nActive[p]}, at::kLong);
float *deltaPtr = delta.data<float>();
auto gtPtr = (signed char *)gt.data_ptr(); //<signed char>();
// auto gtPtr = gt.data<signed char>();
auto &sgsReference = mReference.grids[p];
auto &sgsFull = grids[p];
auto &sgsSparsified = mSparsified.grids[p];
Int batchSize = sgsFull.size();
Int sample;
......@@ -272,20 +271,16 @@ Metadata<dimension>::sparsifyCompare(Metadata<dimension> &mReference,
for (sample = 0; sample < (Int)batchSize; ++sample) {
auto &sgReference = sgsReference[sample];
auto &sgFull = sgsFull[sample];
auto &sgSparsified = sgsSparsified[sample];
for (auto const &iter : sgFull.mp) {
bool gt = sgReference.mp.find(iter.first) != sgReference.mp.end();
bool hot = sgSparsified.mp.find(iter.first) != sgSparsified.mp.end();
if (gt)
bool gt_ = sgReference.mp.find(iter.first) != sgReference.mp.end();
if (gt_) {
ref_map[sgReference.mp[iter.first] + sgReference.ctr] =
iter.second + sgFull.ctr;
if (gt and not hot)
deltaPtr[iter.second + sgFull.ctr] = -1;
if (hot and not gt)
deltaPtr[iter.second + sgFull.ctr] = +1;
gtPtr[iter.second + sgFull.ctr] = +1;
}
}
return {delta, ref_map};
}
return {gt, ref_map};
}
// tensor is size[0] x .. x size[dimension-1] x size[dimension]
......
......@@ -104,8 +104,13 @@ public:
void appendMetadata(Metadata<dimension> &mAdd,
/*long*/ at::Tensor spatialSize);
/* std::vector<at::Tensor> sparsifyCompare(Metadata<dimension> &mReference, */
/* Metadata<dimension> &mSparsified,
*/
/* /\*long*\/ at::Tensor spatialSize);
*/
std::vector<at::Tensor> sparsifyCompare(Metadata<dimension> &mReference,
Metadata<dimension> &mSparsified,
/*long*/ at::Tensor spatialSize);
// tensor is size[0] x .. x size[dimension-1] x size[dimension]
......
......@@ -200,7 +200,7 @@ def SparseResNet(dimension, nInputPlanes, layers):
return m
def UNet(dimension, reps, nPlanes, residual_blocks=False, downsample=[2, 2], leakiness=0):
def UNet(dimension, reps, nPlanes, residual_blocks=False, downsample=[2, 2], leakiness=0, n_input_planes=-1):
"""
U-Net style network with VGG or ResNet-style blocks.
For voxel level prediction:
......@@ -218,6 +218,8 @@ def UNet(dimension, reps, nPlanes, residual_blocks=False, downsample=[2, 2], lea
x=self.linear(x)
return x
"""
if n_input_planes==-1:
n_input_planes=nPlanes[0]
def block(m, a, b):
if residual_blocks: #ResNet style blocks
m.add(scn.ConcatTable()
......@@ -234,13 +236,9 @@ def UNet(dimension, reps, nPlanes, residual_blocks=False, downsample=[2, 2], lea
.add(scn.SubmanifoldConvolution(dimension, a, b, 3, False)))
def U(nPlanes): #Recursive function
m = scn.Sequential()
if len(nPlanes) == 1:
for _ in range(reps):
block(m, nPlanes[0], nPlanes[0])
else:
m = scn.Sequential()
for _ in range(reps):
block(m, nPlanes[0], nPlanes[0])
for i in range(reps):
block(m, n_input_planes if i==0 else nPlanes[0], nPlanes[0])
if len(nPlanes) > 1:
m.add(
scn.ConcatTable().add(
scn.Identity()).add(
......
......@@ -4,14 +4,20 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import sparseconvnet
from torch.autograd import Function, Variable
from torch.nn import Module, Parameter
from torch.nn import Module
import sparseconvnet
from .utils import *
from .sparseConvNetTensor import SparseConvNetTensor
from .metadata import Metadata
from .sequential import Sequential
from .activations import Sigmoid
from .networkInNetwork import NetworkInNetwork
class Sparsify(Module):
class SparsifyFCS(Module):
"""
Sparsify by looking at the first feature channel's sign.
"""
def __init__(self, dimension):
Module.__init__(self)
self.dimension = dimension
......@@ -31,3 +37,65 @@ class Sparsify(Module):
return output
else:
return input
class FakeGradHardSigmoidFunction(torch.autograd.Function):
@staticmethod
def forward(
ctx,
x):
ctx.save_for_backward(x)
with torch.no_grad():
y=(x>0).float()
return y
@staticmethod
def backward(ctx, grad_output):
return grad_output
x, = ctx.saved_tensors
with torch.no_grad():
#Either:
#y=torch.sigmoid(x) #torch.sigmoid(x/5)?
#df = y*(1-y)
#Or:
df = ((-2<x)*(x<+2)).float()*0.25
#
grad_input = grad_output*df
return grad_input
class FakeGradHardSigmoid(Module):
def forward(self, input):
output = SparseConvNetTensor()
output.features = FakeGradHardSigmoidFunction.apply(input.features)
output.metadata = input.metadata
output.spatial_size = input.spatial_size
return output
class Sparsify(Module):
def __init__(self,dimension, nIn, activation=None):
Module.__init__(self)
self.dimension=dimension
self.activation = activation
if activation == 'fakeGradHardSigmoid':
self.net = Sequential(NetworkInNetwork(nIn,1,True),FakeGradHardSigmoid())
elif activation == 'sigmoid':
self.net = Sequential(NetworkInNetwork(nIn,1,True),Sigmoid())
else:
self.net = NetworkInNetwork(nIn,1,True)
def forward(self,input):
if input.features.numel():
output = SparseConvNetTensor()
output.spatial_size = input.spatial_size
output.metadata = Metadata(self.dimension)
output.mask = self.net(input).features.view(-1)
active = output.mask>(0.5 if self.activation else 0)
output.features=input.features[active]
active=active.cpu()
input.metadata.sparsifyMetadata(
output.metadata,
input.spatial_size,
active.byte(),
active.long().cumsum(0))
#print('Sparsify2 output', output.features.shape, output.mask.features.shape)
return output
else:
input.mask=None
return input
......@@ -18,7 +18,7 @@ class JoinTable(torch.nn.Sequential):
output = SparseConvNetTensor()
output.metadata = input[0].metadata
output.spatial_size = input[0].spatial_size
output.features = torch.cat([i.features for i in input], 1)
output.features = torch.cat([i.features for i in input], 1) if input[0].features.numel() else input[0].features
return output
def input_spatial_size(self, out_size):
......
......@@ -145,6 +145,8 @@ def checkpoint_restore(model,exp_name,name2,use_cuda=True,epoch=0):
def is_power2(num):
return num != 0 and ((num & (num - 1)) == 0)
def has_only_one_nonzero_digit(num): #https://oeis.org/A037124
return num != 0 and (num/10**math.floor(math.log(num,10))).is_integer()
def checkpoint_save(model,exp_name,name2,epoch, use_cuda=True):
f=exp_name+'-%09d-'%epoch+name2+'.pth'
model.cpu()
......@@ -157,3 +159,6 @@ def checkpoint_save(model,exp_name,name2,epoch, use_cuda=True):
if os.path.isfile(f):
if not is_power2(epoch):
os.remove(f)
def random_rotation(dimension=3):
return torch.qr(torch.randn(dimension,dimension))[0]
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment