hello-world.py 3.18 KB
Newer Older
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
1
2
3
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
Benjamin Graham's avatar
Benjamin Graham committed
4
# This source code is licensed under the BSD-style license found in the
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
5
6
7
# LICENSE file in the root directory of this source tree.

import torch
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
8
import sparseconvnet as scn
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
9

10
11
12
13
14
15
16
17

# Use the GPU if there is one and sparseconvnet can use it, otherwise CPU
use_cuda = torch.cuda.is_available() and scn.SCN.is_cuda_build()
device = 'cuda:0' if use_cuda else 'cpu'
if use_cuda:
    print("Using CUDA.")
else:
    print("Not using CUDA.")
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
18
19
20

model = scn.Sequential().add(
    scn.SparseVggNet(2, 1,
21
                     [['C', 8], ['C', 8], ['MP', 3, 2],
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
22
23
                      ['C', 16], ['C', 16], ['MP', 3, 2],
                      ['C', 24], ['C', 24], ['MP', 3, 2]])
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
24
).add(
25
    scn.SubmanifoldConvolution(2, 24, 32, 3, False)
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
26
27
28
).add(
    scn.BatchNormReLU(32)
).add(
29
    scn.SparseToDense(2, 32)
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
30
).to(device)
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
31
32

# output will be 10x10
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
33
inputSpatialSize = model.input_spatial_size(torch.LongTensor([10, 10]))
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
34
input_layer = scn.InputLayer(2, inputSpatialSize)
Benjamin Thomas Graham's avatar
example  
Benjamin Thomas Graham committed
35
bl_input_layer = scn.BLInputLayer(2, inputSpatialSize)
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
36

Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
37
38
39
40
41
msgs = [[" X   X  XXX  X    X    XX     X       X   XX   XXX   X    XXX   ",
         " X   X  X    X    X   X  X    X       X  X  X  X  X  X    X  X  ",
         " XXXXX  XX   X    X   X  X    X   X   X  X  X  XXX   X    X   X ",
         " X   X  X    X    X   X  X     X X X X   X  X  X  X  X    X  X  ",
         " X   X  XXX  XXX  XXX  XX       X   X     XX   X  X  XXX  XXX   "],
42

Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
43
44
45
46
47
        [" XXX              XXXXX      x   x     x  xxxxx  xxx ",
         " X  X  X   XXX       X       x   x x   x  x     x  x ",
         " XXX                X        x   xxxx  x  xxxx   xxx ",
         " X     X   XXX       X       x     x   x      x    x ",
         " X     X          XXXX   x   x     x   x  xxxx     x ",]]
Ed Ng's avatar
Ed Ng committed
48

Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
49

Benjamin Thomas Graham's avatar
example  
Benjamin Thomas Graham committed
50
# Create Nx3 and Nx1 vectors to encode the messages above using InputLayer:
Ed Ng's avatar
Ed Ng committed
51
52
locations = []
features = []
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
53
54
55
56
57
58
for batchIdx, msg in enumerate(msgs):
    for y, line in enumerate(msg):
        for x, c in enumerate(line):
            if c == 'X':
                locations.append([y, x, batchIdx])
                features.append([1])
Ed Ng's avatar
Ed Ng committed
59
locations = torch.LongTensor(locations)
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
60
features = torch.FloatTensor(features).to(device)
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
61

Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
62
63
input = input_layer([locations,features])
print('Input SparseConvNetTensor:', input)
Benjamin Thomas Graham's avatar
README  
Benjamin Thomas Graham committed
64
output = model(input)
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
65

66
# Output is 2x32x10x10: our minibatch has 2 samples, the network has 32 output
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
67
# feature planes, and 10x10 is the spatial size of the output.
Benjamin Thomas Graham's avatar
Benjamin Thomas Graham committed
68
print('Output SparseConvNetTensor:', output)
Benjamin Thomas Graham's avatar
example  
Benjamin Thomas Graham committed
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95




# Alternatively:
# Create Nx3 and Nx1 vectors to encode the messages above using BLInputLayer:
batch=[]
for batchIdx, msg in enumerate(msgs):
    l,f=[],[]
    for y, line in enumerate(msg):
        for x, c in enumerate(line):
            if c == 'X':
                l.append([y, x])  #Locations
                f.append([1])     #Features
    batch.append([torch.LongTensor(l),torch.FloatTensor(f)])
batch=scn.prepare_BLInput(batch)
batch[1]=batch[1].to(device)
    
input = bl_input_layer(batch)
print('Input SparseConvNetTensor:', input)
output = model(input)

# Output is 2x32x10x10: our minibatch has 2 samples, the network has 32 output
# feature planes, and 10x10 is the spatial size of the output.
print('Output SparseConvNetTensor:', output)