Commit 9c865087 authored by Benjamin Thomas Graham's avatar Benjamin Thomas Graham
Browse files

non-legacy PyTorch

parent 81d65180
...@@ -5,34 +5,38 @@ ...@@ -5,34 +5,38 @@
# LICENSE file in the root directory of this source tree. # LICENSE file in the root directory of this source tree.
import torch import torch
import torch.legacy.nn as nn import torch.nn as nn
import sparseconvnet.legacy as scn import sparseconvnet as scn
from data import getIterators from data import getIterators
# Use the GPU if there is one, otherwise CPU
dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor'
# two-dimensional SparseConvNet # two-dimensional SparseConvNet
model = nn.Sequential() class Model(nn.Module):
sparseModel = scn.Sequential() def __init__(self):
denseModel = nn.Sequential() nn.Module.__init__(self)
model.add(sparseModel).add(denseModel) self.sparseModel = scn.SparseVggNet(2, 3, [
sparseModel.add(scn.SparseVggNet(2, 3, [ ['C', 8, ], ['C', 8], 'MP',
['C', 8, ], ['C', 8], 'MP', ['C', 16], ['C', 16], 'MP',
['C', 16], ['C', 16], 'MP', ['C', 16, 8], ['C', 16, 8], 'MP',
['C', 16, 8], ['C', 16, 8], 'MP', ['C', 24, 8], ['C', 24, 8], 'MP']
['C', 24, 8], ['C', 24, 8], 'MP'])) ).add(scn.Convolution(2, 32, 64, 5, 1, False)
sparseModel.add(scn.Convolution(2, 32, 64, 5, 1, False)) ).add(scn.BatchNormReLU(64)
sparseModel.add(scn.BatchNormReLU(64)) ).add(scn.SparseToDense(2,64))
sparseModel.add(scn.SparseToDense(2)) self.linear = nn.Linear(64, 183)
denseModel.add(nn.View(-1, 64)) def forward(self, x):
denseModel.add(nn.Linear(64, 183)) x = self.sparseModel(x)
model.type(dtype) x = x.view(-1,64)
print(model) x = self.linear(x)
return x
spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1])) model=Model()
print('input spatial size', spatial_size) spatial_size = model.sparseModel.input_spatial_size(torch.LongTensor([1, 1]))
print('Input spatial size:', spatial_size)
dataset = getIterators(spatial_size, 63, 3) dataset = getIterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate( scn.ClassificationTrainValidate(
model, dataset, model, dataset,
{'nEpochs': 100, 'initial_LR': 0.1, 'LR_decay': 0.05, 'weightDecay': 1e-4, 'checkPoint': False}) {'n_epochs': 100,
'initial_lr': 0.1,
'lr_decay': 0.05,
'weight_decay': 1e-4,
'use_gpu': torch.cuda.is_available(),
'check_point': True,})
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.legacy.nn as nn
import sparseconvnet.legacy as scn
from data import getIterators
# Use the GPU if there is one, otherwise CPU
dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor'
# two-dimensional SparseConvNet
model = nn.Sequential()
sparseModel = scn.Sequential()
denseModel = nn.Sequential()
model.add(sparseModel).add(denseModel)
sparseModel.add(scn.SparseVggNet(2, 3, [
['C', 8, ], ['C', 8], 'MP',
['C', 16], ['C', 16], 'MP',
['C', 16, 8], ['C', 16, 8], 'MP',
['C', 24, 8], ['C', 24, 8], 'MP']))
sparseModel.add(scn.Convolution(2, 32, 64, 5, 1, False))
sparseModel.add(scn.BatchNormReLU(64))
sparseModel.add(scn.SparseToDense(2))
denseModel.add(nn.View(-1, 64))
denseModel.add(nn.Linear(64, 183))
model.type(dtype)
print(model)
spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1]))
print('input spatial size', spatial_size)
dataset = getIterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate(
model, dataset,
{'nEpochs': 100, 'initial_LR': 0.1, 'LR_decay': 0.05, 'weightDecay': 1e-4, 'checkPoint': False})
...@@ -5,11 +5,10 @@ ...@@ -5,11 +5,10 @@
# LICENSE file in the root directory of this source tree. # LICENSE file in the root directory of this source tree.
import torch import torch
import torch.legacy.nn as nn import sparseconvnet as scn
import sparseconvnet.legacy as scn
# Use the GPU if there is one, otherwise CPU # Use the GPU if there is one, otherwise CPU
dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor' use_gpu = torch.cuda.is_available()
model = scn.Sequential().add( model = scn.Sequential().add(
scn.SparseVggNet(2, 1, scn.SparseVggNet(2, 1,
...@@ -21,11 +20,13 @@ model = scn.Sequential().add( ...@@ -21,11 +20,13 @@ model = scn.Sequential().add(
).add( ).add(
scn.BatchNormReLU(32) scn.BatchNormReLU(32)
).add( ).add(
scn.SparseToDense(2) scn.SparseToDense(2,32)
).type(dtype) )
if use_gpu:
model.cuda()
# output will be 10x10 # output will be 10x10
inputSpatialSize = model.suggestInputSize(torch.LongTensor([10, 10])) inputSpatialSize = model.input_spatial_size(torch.LongTensor([10, 10]))
input = scn.InputBatch(2, inputSpatialSize) input = scn.InputBatch(2, inputSpatialSize)
msg = [ msg = [
...@@ -65,10 +66,11 @@ input.setLocations(locations, features, 0) ...@@ -65,10 +66,11 @@ input.setLocations(locations, features, 0)
# 2 if using MP2 pooling for downsizing. # 2 if using MP2 pooling for downsizing.
input.precomputeMetadata(3) input.precomputeMetadata(3)
model.evaluate() model.train()
input.type(dtype) if use_gpu:
input.cuda()
output = model.forward(input) output = model.forward(input)
# Output is 2x32x10x10: our minibatch has 2 samples, the network has 32 output # Output is 2x32x10x10: our minibatch has 2 samples, the network has 32 output
# feature planes, and 10x10 is the spatial size of the output. # feature planes, and 10x10 is the spatial size of the output.
print(output.size(), output.type()) print(output.size(), output.data.type())
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment