Commit 159e5f9a authored by Benjamin Thomas Graham's avatar Benjamin Thomas Graham
Browse files

example

parent aa071d62
...@@ -32,6 +32,7 @@ model = scn.Sequential().add( ...@@ -32,6 +32,7 @@ model = scn.Sequential().add(
# output will be 10x10 # output will be 10x10
inputSpatialSize = model.input_spatial_size(torch.LongTensor([10, 10])) inputSpatialSize = model.input_spatial_size(torch.LongTensor([10, 10]))
input_layer = scn.InputLayer(2, inputSpatialSize) input_layer = scn.InputLayer(2, inputSpatialSize)
bl_input_layer = scn.BLInputLayer(2, inputSpatialSize)
msgs = [[" X X XXX X X XX X X XX XXX X XXX ", msgs = [[" X X XXX X X XX X X XX XXX X XXX ",
" X X X X X X X X X X X X X X X X ", " X X X X X X X X X X X X X X X X ",
...@@ -46,7 +47,7 @@ msgs = [[" X X XXX X X XX X X XX XXX X XXX ", ...@@ -46,7 +47,7 @@ msgs = [[" X X XXX X X XX X X XX XXX X XXX ",
" X X XXXX x x x x xxxx x ",]] " X X XXXX x x x x xxxx x ",]]
# Create Nx3 and Nx1 vectors to encode the messages above: # Create Nx3 and Nx1 vectors to encode the messages above using InputLayer:
locations = [] locations = []
features = [] features = []
for batchIdx, msg in enumerate(msgs): for batchIdx, msg in enumerate(msgs):
...@@ -65,3 +66,30 @@ output = model(input) ...@@ -65,3 +66,30 @@ output = model(input)
# Output is 2x32x10x10: our minibatch has 2 samples, the network has 32 output # Output is 2x32x10x10: our minibatch has 2 samples, the network has 32 output
# feature planes, and 10x10 is the spatial size of the output. # feature planes, and 10x10 is the spatial size of the output.
print('Output SparseConvNetTensor:', output) print('Output SparseConvNetTensor:', output)
# Alternatively:
# Create Nx3 and Nx1 vectors to encode the messages above using BLInputLayer:
batch=[]
for batchIdx, msg in enumerate(msgs):
l,f=[],[]
for y, line in enumerate(msg):
for x, c in enumerate(line):
if c == 'X':
l.append([y, x]) #Locations
f.append([1]) #Features
batch.append([torch.LongTensor(l),torch.FloatTensor(f)])
batch=scn.prepare_BLInput(batch)
batch[1]=batch[1].to(device)
input = bl_input_layer(batch)
print('Input SparseConvNetTensor:', input)
output = model(input)
# Output is 2x32x10x10: our minibatch has 2 samples, the network has 32 output
# feature planes, and 10x10 is the spatial size of the output.
print('Output SparseConvNetTensor:', output)
This diff is collapsed.
...@@ -124,14 +124,15 @@ def batch_location_tensors(location_tensors): ...@@ -124,14 +124,15 @@ def batch_location_tensors(location_tensors):
a.append(pad_with_batch_idx(lt,batch_idx)) a.append(pad_with_batch_idx(lt,batch_idx))
return torch.cat(a,0) return torch.cat(a,0)
def prepare_BLInput(l,f): def prepare_BLInput(batch):
with torch.no_grad(): with torch.no_grad():
n=max([x.size(0) for x in l]) n=max([l.size(0) for l,f in batch])
L=torch.empty(len(l),n,l[0].size(1),dtype=torch.int64).fill_(-1) l,f=batch[0]
F=torch.zeros(len(l),n,f[0].size(1)) L=torch.empty(len(batch),n,l.size(1),dtype=torch.int64).fill_(-1)
for i, (ll, ff) in enumerate(zip(l,f)): F=torch.zeros(len(batch),n,f.size(1))
L[i,:ll.size(0),:].copy_(ll) for i, (l, f) in enumerate(batch):
F[i,:ff.size(0),:].copy_(ff) L[i,:l.size(0),:].copy_(l)
F[i,:f.size(0),:].copy_(f)
return [L,F] return [L,F]
def checkpoint_restore(model,exp_name,name2,use_cuda=True,epoch=0): def checkpoint_restore(model,exp_name,name2,use_cuda=True,epoch=0):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment