Commit 9c865087 authored by Benjamin Thomas Graham's avatar Benjamin Thomas Graham
Browse files

non-legacy PyTorch

parent 81d65180
......@@ -5,34 +5,38 @@
# LICENSE file in the root directory of this source tree.
import torch
import torch.legacy.nn as nn
import sparseconvnet.legacy as scn
import torch.nn as nn
import sparseconvnet as scn
from data import getIterators
# Use the GPU if there is one, otherwise CPU
dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor'
# two-dimensional SparseConvNet
model = nn.Sequential()
sparseModel = scn.Sequential()
denseModel = nn.Sequential()
model.add(sparseModel).add(denseModel)
sparseModel.add(scn.SparseVggNet(2, 3, [
class Model(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.sparseModel = scn.SparseVggNet(2, 3, [
['C', 8, ], ['C', 8], 'MP',
['C', 16], ['C', 16], 'MP',
['C', 16, 8], ['C', 16, 8], 'MP',
['C', 24, 8], ['C', 24, 8], 'MP']))
sparseModel.add(scn.Convolution(2, 32, 64, 5, 1, False))
sparseModel.add(scn.BatchNormReLU(64))
sparseModel.add(scn.SparseToDense(2))
denseModel.add(nn.View(-1, 64))
denseModel.add(nn.Linear(64, 183))
model.type(dtype)
print(model)
['C', 24, 8], ['C', 24, 8], 'MP']
).add(scn.Convolution(2, 32, 64, 5, 1, False)
).add(scn.BatchNormReLU(64)
).add(scn.SparseToDense(2,64))
self.linear = nn.Linear(64, 183)
def forward(self, x):
x = self.sparseModel(x)
x = x.view(-1,64)
x = self.linear(x)
return x
spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1]))
print('input spatial size', spatial_size)
model=Model()
spatial_size = model.sparseModel.input_spatial_size(torch.LongTensor([1, 1]))
print('Input spatial size:', spatial_size)
dataset = getIterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate(
model, dataset,
{'nEpochs': 100, 'initial_LR': 0.1, 'LR_decay': 0.05, 'weightDecay': 1e-4, 'checkPoint': False})
{'n_epochs': 100,
'initial_lr': 0.1,
'lr_decay': 0.05,
'weight_decay': 1e-4,
'use_gpu': torch.cuda.is_available(),
'check_point': True,})
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.legacy.nn as nn
import sparseconvnet.legacy as scn
from data import getIterators
# Use the GPU if there is one, otherwise CPU
dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor'
# two-dimensional SparseConvNet
model = nn.Sequential()
sparseModel = scn.Sequential()
denseModel = nn.Sequential()
model.add(sparseModel).add(denseModel)
sparseModel.add(scn.SparseVggNet(2, 3, [
['C', 8, ], ['C', 8], 'MP',
['C', 16], ['C', 16], 'MP',
['C', 16, 8], ['C', 16, 8], 'MP',
['C', 24, 8], ['C', 24, 8], 'MP']))
sparseModel.add(scn.Convolution(2, 32, 64, 5, 1, False))
sparseModel.add(scn.BatchNormReLU(64))
sparseModel.add(scn.SparseToDense(2))
denseModel.add(nn.View(-1, 64))
denseModel.add(nn.Linear(64, 183))
model.type(dtype)
print(model)
spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1]))
print('input spatial size', spatial_size)
dataset = getIterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate(
model, dataset,
{'nEpochs': 100, 'initial_LR': 0.1, 'LR_decay': 0.05, 'weightDecay': 1e-4, 'checkPoint': False})
......@@ -5,11 +5,10 @@
# LICENSE file in the root directory of this source tree.
import torch
import torch.legacy.nn as nn
import sparseconvnet.legacy as scn
import sparseconvnet as scn
# Use the GPU if there is one, otherwise CPU
dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor'
use_gpu = torch.cuda.is_available()
model = scn.Sequential().add(
scn.SparseVggNet(2, 1,
......@@ -21,11 +20,13 @@ model = scn.Sequential().add(
).add(
scn.BatchNormReLU(32)
).add(
scn.SparseToDense(2)
).type(dtype)
scn.SparseToDense(2,32)
)
if use_gpu:
model.cuda()
# output will be 10x10
inputSpatialSize = model.suggestInputSize(torch.LongTensor([10, 10]))
inputSpatialSize = model.input_spatial_size(torch.LongTensor([10, 10]))
input = scn.InputBatch(2, inputSpatialSize)
msg = [
......@@ -65,10 +66,11 @@ input.setLocations(locations, features, 0)
# 2 if using MP2 pooling for downsizing.
input.precomputeMetadata(3)
model.evaluate()
input.type(dtype)
model.train()
if use_gpu:
input.cuda()
output = model.forward(input)
# Output is 2x32x10x10: our minibatch has 2 samples, the network has 32 output
# feature planes, and 10x10 is the spatial size of the output.
print(output.size(), output.type())
print(output.size(), output.data.type())
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment