Commit 5a42d7a9 authored by Benjamin Thomas Graham's avatar Benjamin Thomas Graham
Browse files

tidy

parent c54569a8
...@@ -40,7 +40,7 @@ def train(spatial_size, Scale, precomputeSize): ...@@ -40,7 +40,7 @@ def train(spatial_size, Scale, precomputeSize):
v = torch.FloatTensor([1, 0, 0]) v = torch.FloatTensor([1, 0, 0])
np_random = np.random.RandomState(tbl['idx']) np_random = np.random.RandomState(tbl['idx'])
for char in tbl['input']: for char in tbl['input']:
inp.addSample() inp.add_sample()
m = torch.eye(2) m = torch.eye(2)
r = np_random.randint(1, 3) r = np_random.randint(1, 3)
alpha = random.uniform(-0.2, 0.2) alpha = random.uniform(-0.2, 0.2)
...@@ -80,7 +80,7 @@ def train(spatial_size, Scale, precomputeSize): ...@@ -80,7 +80,7 @@ def train(spatial_size, Scale, precomputeSize):
# for j in np.arange(0,1,1/l): # for j in np.arange(0,1,1/l):
# p[0]=math.floor(x1*j+x2*(1-j)) # p[0]=math.floor(x1*j+x2*(1-j))
# p[1]=math.floor(y1*j+y2*(1-j)) # p[1]=math.floor(y1*j+y2*(1-j))
# inp.setLocation(p,v,False) # inp.set_location(p,v,False)
############################################################### ###############################################################
inp.precomputeMetadata(precomputeSize) inp.precomputeMetadata(precomputeSize)
return {'input': inp, 'target': torch.LongTensor(tbl['target']) - 1} return {'input': inp, 'target': torch.LongTensor(tbl['target']) - 1}
...@@ -107,7 +107,7 @@ def val(spatial_size, Scale, precomputeSize): ...@@ -107,7 +107,7 @@ def val(spatial_size, Scale, precomputeSize):
p = torch.LongTensor(2) p = torch.LongTensor(2)
v = torch.FloatTensor([1, 0, 0]) v = torch.FloatTensor([1, 0, 0])
for char in tbl['input']: for char in tbl['input']:
inp.addSample() inp.add_sample()
for stroke in char: for stroke in char:
stroke = stroke.float() * (Scale - 0.01) / 255 - 0.5 * (Scale - 0.01) stroke = stroke.float() * (Scale - 0.01) / 255 - 0.5 * (Scale - 0.01)
stroke += center.expand_as(stroke) stroke += center.expand_as(stroke)
...@@ -128,5 +128,5 @@ def val(spatial_size, Scale, precomputeSize): ...@@ -128,5 +128,5 @@ def val(spatial_size, Scale, precomputeSize):
return iter return iter
def getIterators(*args): def get_iterators(*args):
return {'train': train(*args), 'val': val(*args)} return {'train': train(*args), 'val': val(*args)}
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
import torch import torch
import torch.legacy.nn as nn import torch.legacy.nn as nn
import sparseconvnet.legacy as scn import sparseconvnet.legacy as scn
from data import getIterators from data import get_iterators
# Use the GPU if there is one, otherwise CPU # Use the GPU if there is one, otherwise CPU
dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor' dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor'
...@@ -38,7 +38,7 @@ print(model) ...@@ -38,7 +38,7 @@ print(model)
spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1])) spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1]))
print('input spatial size', spatial_size) print('input spatial size', spatial_size)
dataset = getIterators(spatial_size, 64, 2) dataset = get_iterators(spatial_size, 64, 2)
scn.ClassificationTrainValidate( scn.ClassificationTrainValidate(
model, dataset, model, dataset,
{'nEpochs': 100, 'initial_LR': 0.1, 'LR_decay': 0.05, 'weightDecay': 1e-4}) {'nEpochs': 100, 'initial_LR': 0.1, 'LR_decay': 0.05, 'weightDecay': 1e-4})
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
import torch import torch
import torch.legacy.nn as nn import torch.legacy.nn as nn
import sparseconvnet.legacy as scn import sparseconvnet.legacy as scn
from data import getIterators from data import get_iterators
# Use the GPU if there is one, otherwise CPU # Use the GPU if there is one, otherwise CPU
dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor' dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor'
...@@ -34,7 +34,7 @@ print(model) ...@@ -34,7 +34,7 @@ print(model)
spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1])) spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1]))
print('input spatial size', spatial_size) print('input spatial size', spatial_size)
dataset = getIterators(spatial_size, 63, 3) dataset = get_iterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate( scn.ClassificationTrainValidate(
model, dataset, model, dataset,
{'nEpochs': 100, 'initial_LR': 0.1, 'LR_decay': 0.05, 'weightDecay': 1e-4}) {'nEpochs': 100, 'initial_LR': 0.1, 'LR_decay': 0.05, 'weightDecay': 1e-4})
...@@ -5,35 +5,40 @@ ...@@ -5,35 +5,40 @@
# LICENSE file in the root directory of this source tree. # LICENSE file in the root directory of this source tree.
import torch import torch
import torch.legacy.nn as nn import torch.nn as nn
import sparseconvnet.legacy as scn import sparseconvnet as scn
from data import getIterators from data import get_iterators
# Use the GPU if there is one, otherwise CPU
dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor'
# two-dimensional SparseConvNet # two-dimensional SparseConvNet
model = nn.Sequential() class Model(nn.Module):
sparseModel = scn.Sequential() def __init__(self):
denseModel = nn.Sequential() nn.Module.__init__(self)
model.add(sparseModel).add(denseModel) self.sparseModel = scn.SparseVggNet(2, 3, [
sparseModel.add(scn.SparseVggNet(2, 3, [
['C', 16], ['C', 16], 'MP', ['C', 16], ['C', 16], 'MP',
['C', 32], ['C', 32], 'MP', ['C', 32], ['C', 32], 'MP',
['C', 48], ['C', 48], 'MP', ['C', 48], ['C', 48], 'MP',
['C', 64], ['C', 64], 'MP', ['C', 64], ['C', 64], 'MP',
['C', 96], ['C', 96]])) ['C', 96], ['C', 96]]
sparseModel.add(scn.Convolution(2, 96, 128, 3, 2, False)) ).add(scn.Convolution(2, 96, 128, 3, 2, False)
sparseModel.add(scn.BatchNormReLU(128)) ).add(scn.BatchNormReLU(128)
sparseModel.add(scn.SparseToDense(2)) ).add(scn.SparseToDense(2, 128))
denseModel.add(nn.View(-1, 128)) self.linear = nn.Linear(128, 3755)
denseModel.add(nn.Linear(128, 3755))
model.type(dtype) def forward(self, x):
print(model) x = self.sparseModel(x)
x = x.view(-1, 128)
x = self.linear(x)
return x
spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1])) model = Model()
print('input spatial size', spatial_size) spatial_size = model.sparseModel.input_spatial_size(torch.LongTensor([1, 1]))
dataset = getIterators(spatial_size, 63, 3) print('Input spatial size:', spatial_size)
dataset = get_iterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate( scn.ClassificationTrainValidate(
model, dataset, model, dataset,
{'nEpochs': 100, 'initial_LR': 0.1, 'LR_decay': 0.05, 'weightDecay': 1e-4}) {'n_epochs': 100,
'initial_lr': 0.1,
'lr_decay': 0.05,
'weight_decay': 1e-4,
'use_gpu': torch.cuda.is_available(),
'check_point': True, })
...@@ -5,35 +5,40 @@ ...@@ -5,35 +5,40 @@
# LICENSE file in the root directory of this source tree. # LICENSE file in the root directory of this source tree.
import torch import torch
import torch.legacy.nn as nn import torch.nn as nn
import sparseconvnet.legacy as scn import sparseconvnet as scn
from data import getIterators from data import get_iterators
# Use the GPU if there is one, otherwise CPU
dtype = 'torch.cuda.FloatTensor' if torch.cuda.is_available() else 'torch.FloatTensor'
# two-dimensional SparseConvNet # two-dimensional SparseConvNet
model = nn.Sequential() class Model(nn.Module):
sparseModel = scn.Sequential() def __init__(self):
denseModel = nn.Sequential() nn.Module.__init__(self)
model.add(sparseModel).add(denseModel) self.sparseModel = scn.SparseVggNet(2, 3, [
sparseModel.add(scn.SparseVggNet(2, 3, [
['C', 16, 8], ['C', 16, 8], 'MP', ['C', 16, 8], ['C', 16, 8], 'MP',
['C', 32, 8], ['C', 32, 8], 'MP', ['C', 32, 8], ['C', 32, 8], 'MP',
['C', 48, 16], ['C', 48, 16], 'MP', ['C', 48, 16], ['C', 48, 16], 'MP',
['C', 64, 16], ['C', 64, 16], 'MP', ['C', 64, 16], ['C', 64, 16], 'MP',
['C', 96, 16], ['C', 96, 16]])) ['C', 96, 16], ['C', 96, 16]]
sparseModel.add(scn.Convolution(2, 96 + 16, 128, 3, 2, False)) ).add(scn.Convolution(2, 112, 128, 3, 2, False)
sparseModel.add(scn.BatchNormReLU(128)) ).add(scn.BatchNormReLU(128)
sparseModel.add(scn.SparseToDense(2)) ).add(scn.SparseToDense(2, 128))
denseModel.add(nn.View(-1, 128)) self.linear = nn.Linear(128, 3755)
denseModel.add(nn.Linear(128, 3755))
model.type(dtype) def forward(self, x):
print(model) x = self.sparseModel(x)
x = x.view(-1, 128)
x = self.linear(x)
return x
spatial_size = sparseModel.suggestInputSize(torch.LongTensor([1, 1])) model = Model()
print('input spatial size', spatial_size) spatial_size = model.sparseModel.input_spatial_size(torch.LongTensor([1, 1]))
dataset = getIterators(spatial_size, 63, 3) print('Input spatial size:', spatial_size)
dataset = get_iterators(spatial_size, 63, 3)
scn.ClassificationTrainValidate( scn.ClassificationTrainValidate(
model, dataset, model, dataset,
{'nEpochs': 100, 'initial_LR': 0.1, 'LR_decay': 0.05, 'weightDecay': 1e-4}) {'n_epochs': 100,
'initial_lr': 0.1,
'lr_decay': 0.05,
'weight_decay': 1e-4,
'use_gpu': torch.cuda.is_available(),
'check_point': True, })
...@@ -39,7 +39,7 @@ def train(spatial_size, Scale, precomputeSize): ...@@ -39,7 +39,7 @@ def train(spatial_size, Scale, precomputeSize):
p = torch.LongTensor(2) p = torch.LongTensor(2)
v = torch.FloatTensor([1, 0, 0]) v = torch.FloatTensor([1, 0, 0])
for char in tbl['input']: for char in tbl['input']:
inp.addSample() inp.add_sample()
for stroke in char: for stroke in char:
stroke = stroke.float() * (Scale - 0.01) / 255 - 0.5 * (Scale - 0.01) stroke = stroke.float() * (Scale - 0.01) / 255 - 0.5 * (Scale - 0.01)
stroke += center.expand_as(stroke) stroke += center.expand_as(stroke)
...@@ -66,7 +66,7 @@ def train(spatial_size, Scale, precomputeSize): ...@@ -66,7 +66,7 @@ def train(spatial_size, Scale, precomputeSize):
# for j in numpy.arange(0,1,1/l): # for j in numpy.arange(0,1,1/l):
# p[0]=math.floor(x1*j+x2*(1-j)) # p[0]=math.floor(x1*j+x2*(1-j))
# p[1]=math.floor(y1*j+y2*(1-j)) # p[1]=math.floor(y1*j+y2*(1-j))
# inp.setLocation(p,v,False) # inp.set_location(p,v,False)
############################################################### ###############################################################
inp.precomputeMetadata(precomputeSize) inp.precomputeMetadata(precomputeSize)
return {'input': inp, 'target': torch.LongTensor(tbl['target'])} return {'input': inp, 'target': torch.LongTensor(tbl['target'])}
...@@ -93,7 +93,7 @@ def val(spatial_size, Scale, precomputeSize): ...@@ -93,7 +93,7 @@ def val(spatial_size, Scale, precomputeSize):
p = torch.LongTensor(2) p = torch.LongTensor(2)
v = torch.FloatTensor([1, 0, 0]) v = torch.FloatTensor([1, 0, 0])
for char in tbl['input']: for char in tbl['input']:
inp.addSample() inp.add_sample()
for stroke in char: for stroke in char:
stroke = stroke.float() * (Scale - 0.01) / 255 - 0.5 * (Scale - 0.01) stroke = stroke.float() * (Scale - 0.01) / 255 - 0.5 * (Scale - 0.01)
stroke += center.expand_as(stroke) stroke += center.expand_as(stroke)
...@@ -114,5 +114,5 @@ def val(spatial_size, Scale, precomputeSize): ...@@ -114,5 +114,5 @@ def val(spatial_size, Scale, precomputeSize):
return iter return iter
def getIterators(*args): def get_iterators(*args):
return {'train': train(*args), 'val': val(*args)} return {'train': train(*args), 'val': val(*args)}
...@@ -36,17 +36,17 @@ msg = [ ...@@ -36,17 +36,17 @@ msg = [
" X X X X X X X X X X X X X X X X X X ", " X X X X X X X X X X X X X X X X X X ",
" X X XXX XXX XXX XX X X XX X X XXX XXX "] " X X XXX XXX XXX XX X X XX X X XXX XXX "]
# Add a sample using setLocation # Add a sample using set_location
input.addSample() input.add_sample()
for y, line in enumerate(msg): for y, line in enumerate(msg):
for x, c in enumerate(line): for x, c in enumerate(line):
if c == 'X': if c == 'X':
location = torch.LongTensor([y, x]) location = torch.LongTensor([y, x])
featureVector = torch.FloatTensor([1]) featureVector = torch.FloatTensor([1])
input.setLocation(location, featureVector, 0) input.set_location(location, featureVector, 0)
# Add a sample using setLocations # Add a sample using set_locations
input.addSample() input.add_sample()
locations = [] locations = []
features = [] features = []
for y, line in enumerate(msg): for y, line in enumerate(msg):
...@@ -56,7 +56,7 @@ for y, line in enumerate(msg): ...@@ -56,7 +56,7 @@ for y, line in enumerate(msg):
features.append([1]) features.append([1])
locations = torch.LongTensor(locations) locations = torch.LongTensor(locations)
features = torch.FloatTensor(features) features = torch.FloatTensor(features)
input.setLocations(locations, features, 0) input.set_locations(locations, features, 0)
# Optional: allow metadata preprocessing to be done in batch preparation threads # Optional: allow metadata preprocessing to be done in batch preparation threads
# to improve GPU utilization. # to improve GPU utilization.
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment