Commit 9c865087 authored by Benjamin Thomas Graham's avatar Benjamin Thomas Graham
Browse files

non-legacy PyTorch

parent 81d65180
......@@ -6,3 +6,34 @@
forward_pass_multiplyAdd_count = 0
forward_pass_hidden_states = 0
from .averagePooling import AveragePooling
from .batchNormalization import BatchNormalization, BatchNormReLU, BatchNormLeakyReLU
from .classificationTrainValidate import ClassificationTrainValidate
from .convolution import Convolution
from .deconvolution import Deconvolution
from .denseToSparse import DenseToSparse
from .identity import Identity
from .inputBatch import InputBatch
from .maxPooling import MaxPooling
from .metadata import Metadata
from .networkArchitectures import *
from .networkInNetwork import NetworkInNetwork
from .sequential import Sequential
from .sparseConvNetTensor import SparseConvNetTensor
from .sparseToDense import SparseToDense
from .submanifoldConvolution import SubmanifoldConvolution, ValidConvolution
from .tables import *
def concatenate_feature_planes(input):
output = SparseConvNetTensor()
output.metadata = input[0].metadata
output.spatial_size = input[0].metadata
output.features=torch.cat([i.features for i in input],1)
return output
def add_feature_planes(input):
output = SparseConvNetTensor()
output.metadata = input[0].metadata
output.spatial_size = input[0].metadata
output.features=sum([i.features for i in input])
return output
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from torch.autograd import Function, Variable
from torch.nn import Module
from .utils import *
from .sparseConvNetTensor import SparseConvNetTensor
class AveragePoolingFunction(Function):
@staticmethod
def forward(
ctx,
input_features,
input_metadata,
input_spatial_size,
output_spatial_size,
dimension,
pool_size,
pool_stride,
nFeaturesToDrop):
ctx.input_features=input_features
ctx.input_metadata=input_metadata
ctx.input_spatial_size = input_spatial_size
ctx.output_spatial_size = output_spatial_size
ctx.dimension = dimension
ctx.pool_size = pool_size
ctx.pool_stride = pool_stride
ctx.nFeaturesToDrop = nFeaturesToDrop
output_features = input_features.new()
dim_typed_fn(dimension, input_features, 'AveragePooling_updateOutput')(
input_spatial_size,
output_spatial_size,
pool_size,
pool_stride,
input_metadata.ffi,
input_features,
output_features,
nFeaturesToDrop,
torch.cuda.IntTensor() if input_features.is_cuda else nullptr)
return output_features
@staticmethod
def backward(ctx, grad_output):
grad_input=Variable(grad_output.data.new())
dim_typed_fn(
ctx.dimension, ctx.input_features, 'AveragePooling_updateGradInput')(
ctx.input_spatial_size,
ctx.output_spatial_size,
ctx.pool_size,
ctx.pool_stride,
ctx.input_metadata.ffi,
ctx.input_features,
grad_input.data,
grad_output.data.contiguous(),
ctx.nFeaturesToDrop,
torch.cuda.IntTensor() if ctx.input_features.is_cuda else nullptr)
return grad_input, None, None, None, None, None, None, None
class AveragePooling(Module):
def __init__(self, dimension, pool_size, pool_stride, nFeaturesToDrop=0):
super(AveragePooling, self).__init__()
self.dimension = dimension
self.pool_size = toLongTensor(dimension, pool_size)
self.pool_stride = toLongTensor(dimension, pool_stride)
self.nFeaturesToDrop = nFeaturesToDrop
def forward(self, input):
output = SparseConvNetTensor()
output.metadata = input.metadata
output.spatial_size = (
input.spatial_size - self.pool_size) / self.pool_stride + 1
assert ((output.spatial_size-1)*self.pool_stride+self.pool_size==input.spatial_size).all()
output.features = AveragePoolingFunction().apply(
input.features, input.metadata, input.spatial_size,
output.spatial_size, self.dimension,self.pool_size,self.pool_stride,
self.nFeaturesToDrop)
return output
def input_spatial_size(self, out_size):
return (out_size - 1) * self.pool_stride + self.pool_size
def __repr__(self):
s = 'AveragePooling'
if self.pool_size.max() == self.pool_size.min() and\
self.pool_stride.max() == self.pool_stride.min():
s = s + str(self.pool_size[0]) + '/' + str(self.pool_stride[0])
else:
s = s + '(' + str(self.pool_size[0])
for i in self.pool_size[1:]:
s = s + ',' + str(i)
s = s + ')/(' + str(self.pool_stride[0])
for i in self.pool_stride[1:]:
s = s + ',' + str(i)
s = s + ')'
if self.nFeaturesToDrop > 0:
s = s + ' nFeaturesToDrop = ' + self.nFeaturesToDrop
return s
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Parameters:
nPlanes : number of input planes
eps : small number used to stabilise standard deviation calculation
momentum : for calculating running average for testing (default 0.9)
affine : only 'true' is supported at present (default 'true')
noise : add multiplicative and additive noise during training if >0.
leakiness : Apply activation def inplace: 0<=leakiness<=1.
0 for ReLU, values in (0,1) for LeakyReLU, 1 for no activation def.
"""
from torch.autograd import Function, Variable
from torch.nn import Module, Parameter
from .utils import *
from .sparseConvNetTensor import SparseConvNetTensor
class BatchNormalizationFunction(Function):
@staticmethod
def forward(
ctx,
input_features,
weight,
bias,
runningMean,
runningVar,
eps,
momentum,
train,
leakiness):
ctx.nPlanes=runningMean.shape[0]
ctx.input_features=input_features
ctx.weight=weight
ctx.bias=bias
ctx.runningMean=runningMean
ctx.runningVar=runningVar
ctx.train=train
ctx.leakiness=leakiness
ctx.output_features = input_features.new()
ctx.saveMean = input_features.new().resize_(ctx.nPlanes)
ctx.saveInvStd = runningMean.clone().resize_(ctx.nPlanes)
typed_fn(input_features, 'BatchNormalization_updateOutput')(
input_features,
ctx.output_features,
ctx.saveMean,
ctx.saveInvStd,
ctx.runningMean,
ctx.runningVar,
ctx.weight if ctx.weight is not None else nullptr,
ctx.bias if ctx.bias is not None else nullptr,
eps,
momentum,
ctx.train,
ctx.leakiness)
return ctx.output_features
@staticmethod
def backward(ctx, grad_output):
assert ctx.train
grad_input=Variable(grad_output.data.new())
if ctx.weight is None:
grad_weight=None
else:
grad_weight=Variable(ctx.input_features.new().resize_(ctx.nPlanes).zero_())
if ctx.bias is None:
grad_bias=None
else:
grad_bias=Variable(ctx.input_features.new().resize_(ctx.nPlanes).zero_())
typed_fn(ctx.input_features, 'BatchNormalization_backward')(
ctx.input_features,
grad_input.data,
ctx.output_features,
grad_output.data.contiguous(),
ctx.saveMean,
ctx.saveInvStd,
ctx.runningMean,
ctx.runningVar,
ctx.weight if ctx.weight is not None else nullptr,
ctx.bias if ctx.bias is not None else nullptr,
grad_weight.data if grad_weight is not None else nullptr,
grad_bias.data if grad_bias is not None else nullptr,
ctx.leakiness)
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
class BatchNormalization(Module):
def __init__(
self,
nPlanes,
eps=1e-4,
momentum=0.9,
affine=True,
leakiness=1):
Module.__init__(self)
self.nPlanes = nPlanes
self.eps = eps
self.momentum = momentum
self.affine = affine
self.leakiness = leakiness
self.register_buffer("runningMean", torch.Tensor(nPlanes).fill_(0))
self.register_buffer("runningVar", torch.Tensor(nPlanes).fill_(1))
if affine:
self.weight = Parameter(torch.Tensor(nPlanes).fill_(1))
self.bias = Parameter(torch.Tensor(nPlanes).fill_(0))
else:
self.weight = None
self.bias = None
def forward(self, input):
assert input.features.ndimension()==0 or input.features.size(1) == self.nPlanes
output = SparseConvNetTensor()
output.metadata = input.metadata
output.spatial_size = input.spatial_size
output.features = BatchNormalizationFunction().apply(
input.features,
self.weight,
self.bias,
self.runningMean,
self.runningVar,
self.eps,
self.momentum,
self.training,
self.leakiness)
return output
def input_spatial_size(self, out_size):
return out_size
def __repr__(self):
s = 'BatchNorm(' + str(self.nPlanes) + ',eps=' + str(self.eps) + \
',momentum=' + str(self.momentum) + ',affine=' + str(self.affine)
if self.leakiness > 0:
s = s + ',leakiness=' + str(self.leakiness)
s = s + ')'
return s
class BatchNormReLU(BatchNormalization):
def __init__(self, nPlanes, eps=1e-4, momentum=0.9):
BatchNormalization.__init__(self, nPlanes, eps, momentum, True, 0)
def __repr__(self):
s = 'BatchNormReLU(' + str(self.nPlanes) + ',eps=' + str(self.eps) + \
',momentum=' + str(self.momentum) + ',affine=' + str(self.affine) + ')'
return s
class BatchNormLeakyReLU(BatchNormalization):
def __init__(self, nPlanes, eps=1e-4, momentum=0.9):
BatchNormalization.__init__(self, nPlanes, eps, momentum, True, 0.333)
def __repr__(self):
s = 'BatchNormReLU(' + str(self.nPlanes) + ',eps=' + str(self.eps) + \
',momentum=' + str(self.momentum) + ',affine=' + str(self.affine) + ')'
return s
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
import sparseconvnet as s
import time
import os
import math
def updateStats(stats, output, target, loss):
batchSize = output.size(0)
stats['n'] = stats['n'] + batchSize
stats['nll'] = stats['nll'] + loss * batchSize
_, predictions = output.float().sort(1, True)
correct = predictions.eq(
target.long().view(batchSize, 1).expand_as(output))
# Top-1 score
stats['top1'] += correct.narrow(1, 0, 1).sum()
# Top-5 score
l = min(5, correct.size(1))
stats['top5'] += correct.narrow(1, 0, l).sum()
def ClassificationTrainValidate(model, dataset, p):
criterion = nn.CrossEntropyLoss()
if 'n_epochs' not in p:
p['n_epochs'] = 100
if 'initial_lr' not in p:
p['initial_lr'] = 1e-1
if 'lr_decay' not in p:
p['lr_decay'] = 4e-2
if 'weight_decay' not in p:
p['weight_decay'] = 1e-4
if 'momentum' not in p:
p['momentum'] = 0.9
if 'check_point' not in p:
p['check_point'] = False
if 'use_gpu' not in p:
p['use_gpu'] = torch.cuda.is_available()
if p['use_gpu']:
model.cuda()
criterion.cuda()
optimizer = optim.SGD(model.parameters(),
lr=p['initial_lr'],
momentum = p['momentum'],
weight_decay = p['weight_decay'],
nesterov=True)
if p['check_point'] and os.path.isfile('epoch.pth'):
p['epoch'] = torch.load('epoch.pth') + 1
print('Restarting at epoch ' +
str(p['epoch']) +
' from model.pth ..')
model.load_state_dict(torch.load('model.pth'))
else:
p['epoch']=1
print(p)
print('#parameters', sum([x.nelement() for x in model.parameters()]))
for epoch in range(p['epoch'], p['n_epochs'] + 1):
model.train()
stats = {'top1': 0, 'top5': 0, 'n': 0, 'nll': 0}
for param_group in optimizer.param_groups:
param_group['lr'] = p['initial_lr'] * \
math.exp((1 - epoch) * p['lr_decay'])
start = time.time()
for batch in dataset['train']():
if p['use_gpu']:
batch['input']=batch['input'].cuda()
batch['target'] = batch['target'].cuda()
batch['input'].to_variable(requires_grad=True)
batch['target'] = Variable(batch['target'])
optimizer.zero_grad()
output = model(batch['input'])
loss = criterion(output, batch['target'])
updateStats(stats, output.data, batch['target'].data, loss.data[0])
loss.backward()
optimizer.step()
print(epoch, 'train: top1=%.2f%% top5=%.2f%% nll:%.2f time:%.1fs' %
(100 *
(1 -
1.0 * stats['top1'] /
stats['n']), 100 *
(1 -
1.0 * stats['top5'] /
stats['n']), stats['nll'] /
stats['n'], time.time() -
start))
if p['check_point']:
torch.save(epoch, 'epoch.pth')
torch.save(model.state_dict(),'model.pth')
model.eval()
s.forward_pass_multiplyAdd_count = 0
s.forward_pass_hidden_states = 0
stats = {'top1': 0, 'top5': 0, 'n': 0, 'nll': 0}
start = time.time()
for batch in dataset['val']():
if p['use_gpu']:
batch['input']=batch['input'].cuda()
batch['target'] = batch['target'].cuda()
batch['input'].to_variable()
batch['target'] = Variable(batch['target'])
output = model(batch['input'])
loss = criterion(output, batch['target'])
updateStats(stats, output.data, batch['target'].data, loss.data[0])
print(epoch, 'test: top1=%.2f%% top5=%.2f%% nll:%.2f time:%.1fs' %
(100 *
(1 -
1.0 * stats['top1'] /
stats['n']), 100 *
(1 -
1.0 * stats['top5'] /
stats['n']), stats['nll'] /
stats['n'], time.time() -
start))
print(
'%.3e MultiplyAdds/sample %.3e HiddenStates/sample' %
(s.forward_pass_multiplyAdd_count /
stats['n'],
s.forward_pass_hidden_states /
stats['n']))
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sparseconvnet
from torch.autograd import Function, Variable
from torch.nn import Module, Parameter
from .utils import *
from .sparseConvNetTensor import SparseConvNetTensor
class ConvolutionFunction(Function):
@staticmethod
def forward(
ctx,
input_features,
weight,
bias,
input_metadata,
input_spatial_size,
output_spatial_size,
dimension,
filter_size,
filter_stride):
output_features=input_features.new()
ctx.input_features=input_features
ctx.input_metadata=input_metadata
ctx.input_spatial_size=input_spatial_size
ctx.weight=weight
ctx.bias=bias
ctx.output_features=input_features.new()
ctx.output_spatial_size=output_spatial_size
ctx.dimension=dimension
ctx.filter_size=filter_size
ctx.filter_stride=filter_stride
sparseconvnet.forward_pass_multiplyAdd_count +=\
dim_typed_fn(
dimension, input_features, 'Convolution_updateOutput')(
input_spatial_size,
output_spatial_size,
filter_size,
filter_stride,
input_metadata.ffi,
input_features,
output_features,
weight,
bias if bias is not None else nullptr,
0, #remove this parameter!!
torch.cuda.IntTensor() if input_features.is_cuda else nullptr)
sparseconvnet.forward_pass_hidden_states += output_features.nelement()
return output_features
@staticmethod
def backward(ctx, grad_output):
grad_input=Variable(grad_output.data.new())
grad_weight=Variable(grad_output.data.new().resize_as_(ctx.weight).zero_())
if ctx.bias is None:
grad_bias=None
else:
grad_bias = Variable(grad_output.data.new().resize_as_(bias).zero_())
if ctx.bias is None:
grad_bias=None
else:
grad_bias = Variable(grad_output.data.new().resize_as_(ctx.bias).zero_())
dim_typed_fn(
ctx.dimension, ctx.input_features, 'Convolution_backward')(
ctx.input_spatial_size,
ctx.output_spatial_size,
ctx.filter_size,
ctx.filter_stride,
ctx.input_metadata.ffi,
ctx.input_features,
grad_input.data,
grad_output.data.contiguous(),
ctx.weight,
grad_weight.data,
grad_bias.data if grad_bias is not None else nullptr,
0, #remove this parameter
torch.cuda.IntTensor() if ctx.input_features.is_cuda else nullptr)
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
class Convolution(Module):
def __init__(self, dimension, nIn, nOut, filter_size, filter_stride, bias):
Module.__init__(self)
self.dimension = dimension
self.nIn = nIn
self.nOut = nOut
self.filter_size = toLongTensor(dimension, filter_size)
self.filter_volume = self.filter_size.prod()
self.filter_stride = toLongTensor(dimension, filter_stride)
std = (2.0 / nIn / self.filter_volume)**0.5
self.weight = Parameter(torch.Tensor(
self.filter_volume * nIn, nOut).normal_(
0,
std))
if bias:
self.bias = Parameter(torch.Tensor(nOut).zero_())
else:
self.bias=None
def forward(self, input):
assert input.features.ndimension()==0 or input.features.size(1) == self.nIn
output = SparseConvNetTensor()
output.metadata = input.metadata
output.spatial_size =\
(input.spatial_size - self.filter_size) / self.filter_stride + 1
assert ((output.spatial_size-1)*self.filter_stride+self.filter_size==input.spatial_size).all()
output.features=ConvolutionFunction().apply(
input.features,
self.weight,
self.bias,
input.metadata,
input.spatial_size,
output.spatial_size,
self.dimension,
self.filter_size,
self.filter_stride,
)
return output
def __repr__(self):
s = 'Convolution ' + str(self.nIn) + '->' + str(self.nOut) + ' C'
if self.filter_size.max() == self.filter_size.min() and\
self.filter_stride.max() == self.filter_stride.min():
s = s + str(self.filter_size[0]) + '/' + str(self.filter_stride[0])
else:
s = s + '(' + str(self.filter_size[0])
for i in self.filter_size[1:]:
s = s + ',' + str(i)
s = s + ')/(' + str(self.filter_stride[0])
for i in self.filter_stride[1:]:
s = s + ',' + str(i)
s = s + ')'
return s
def input_spatial_size(self, out_size):
return (out_size - 1) * self.filter_stride + self.filter_size
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sparseconvnet
from torch.autograd import Function, Variable
from torch.nn import Module, Parameter
from .utils import *
from .sparseConvNetTensor import SparseConvNetTensor
class DeconvolutionFunction(Function):
@staticmethod
def forward(
ctx,
input_features,
weight,
bias,
input_metadata,
input_spatial_size,
output_spatial_size,
dimension,
filter_size,
filter_stride):
ctx.input_features=input_features
ctx.input_metadata=input_metadata
ctx.input_spatial_size=input_spatial_size
ctx.weight=weight
ctx.bias=bias
ctx.output_features=input_features.new()
ctx.output_spatial_size=output_spatial_size
ctx.dimension=dimension
ctx.filter_size=filter_size
ctx.filter_stride=filter_stride
sparseconvnet.forward_pass_multiplyAdd_count +=\
dim_typed_fn(
dimension, input_features, 'Deconvolution_updateOutput')(
input_spatial_size,
output_spatial_size,
filter_size,
filter_stride,
input_metadata.ffi,
input_features,
ctx.output_features,
weight,
bias if bias is not None else nullptr,
0, #remove this parameter!!
torch.cuda.IntTensor() if input_features.is_cuda else nullptr)
sparseconvnet.forward_pass_hidden_states += ctx.output_features.nelement()
return ctx.output_features
@staticmethod
def backward(ctx, grad_output):
grad_input=Variable(grad_output.data.new())
grad_weight=Variable(grad_output.data.new().resize_as_(ctx.weight).zero_())
if ctx.bias is None:
grad_bias=None
else:
grad_bias = Variable(grad_output.data.new().resize_as_(ctx.bias).zero_())
dim_typed_fn(
ctx.dimension, ctx.input_features, 'Deconvolution_backward')(
ctx.input_spatial_size,
ctx.output_spatial_size,
ctx.filter_size,
ctx.filter_stride,
ctx.input_metadata.ffi,
ctx.input_features,
grad_input.data,
grad_output.data.contiguous(),
ctx.weight,
grad_weight.data,
grad_bias.data if grad_bias is not None else nullptr,
0, #remove this parameter
torch.cuda.IntTensor() if ctx.input_features.is_cuda else nullptr)
return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
class Deconvolution(Module):
def __init__(self, dimension, nIn, nOut, filter_size, filter_stride, bias):
Module.__init__(self)
self.dimension = dimension
self.nIn = nIn
self.nOut = nOut
self.filter_size = toLongTensor(dimension, filter_size)
self.filter_volume = self.filter_size.prod()
self.filter_stride = toLongTensor(dimension, filter_stride)
std = (2.0 / nIn / self.filter_volume)**0.5
self.weight = Parameter(torch.Tensor(
self.filter_volume * nIn, nOut).normal_(
0,
std))
if bias:
self.bias = Parameter(torch.Tensor(nOut).zero_())
else:
self.bias=None
def forward(self, input):
assert input.features.ndimension()==0 or input.features.size(1) == self.nIn
output = SparseConvNetTensor()
output.metadata = input.metadata
output.spatial_size =\
(input.spatial_size - 1) * self.filter_stride + self.filter_size
output.features=DeconvolutionFunction().apply(
input.features,
self.weight,
self.bias,
input.metadata,
input.spatial_size,
output.spatial_size,
self.dimension,
self.filter_size,
self.filter_stride,
)
return output
def __repr__(self):
s = 'Deconvolution ' + str(self.nIn) + '->' + str(self.nOut) + ' C'
if self.filter_size.max() == self.filter_size.min() and\
self.filter_stride.max() == self.filter_stride.min():
s = s + str(self.filter_size[0]) + '/' + str(self.filter_stride[0])
else:
s = s + '(' + str(self.filter_size[0])
for i in self.filter_size[1:]:
s = s + ',' + str(i)
s = s + ')/(' + str(self.filter_stride[0])
for i in self.filter_stride[1:]:
s = s + ',' + str(i)
s = s + ')'
return s
def input_spatial_size(self, out_size):
return (out_size - 1) * self.filter_stride + self.filter_size
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Function to convert a Dense Input into a sparse input.
If possible, avoid using this module; build the hidden layer using InputBatch.
Parameters:
dimension : of the input field
"""
from torch.autograd import Function, Variable
from torch.nn import Module
from .utils import *
from .metadata import Metadata
from .sparseConvNetTensor import SparseConvNetTensor
class DenseToSparseFunction(Function):
@staticmethod
def forward(
ctx,
input,
output_metadata,
output_spatial_size,
dimension):
ctx.dimension=dimension
a=input
aa=a.permute(*([0,]+list(range(2,2+dimension))+[1,])).clone()
ctx.aas=aa.size()
nz=aa.abs().sum(dimension+1).view(aa.size()[0:-1])
s=torch.LongTensor(nz.stride()).view(1,dimension+1)
nz=nz.nonzero()
s=s.type_as(nz)
aa=aa.view(-1,a.size(1))
ctx.aas2=aa.size()
ctx.r=(nz*s.expand_as(nz)).sum(1).view(-1)
ctx.output_features=aa.index_select(0,ctx.r)
dim_fn(dimension, 'createMetadataForDenseToSparse')(
output_metadata.ffi,
output_spatial_size,
nz.cpu(),
input.size(0))
return ctx.output_features
@staticmethod
def backward(ctx, grad_output):
grad_input=Variable(grad_output.data.new().resize_(ctx.aas2).zero_().index_copy_(0,ctx.r,grad_output.data))
grad_input=grad_input.view(ctx.aas).permute(*([0,ctx.dimension+1]+list(range(1,ctx.dimension+1))))
return grad_input, None, None, None
class DenseToSparse(Module):
def __init__(self, dimension):
Module.__init__(self)
self.dimension = dimension
def forward(self, input):
output = SparseConvNetTensor()
output.metadata = Metadata(self.dimension)
output.spatial_size=torch.LongTensor(list(input.size()[2:]))
output.features=DenseToSparseFunction().apply(
input,
output.metadata,
output.spatial_size,
self.dimension)
return output
def __repr__(self):
return 'DenseToSparse(' + str(self.dimension) + ')'
def input_spatial_size(self, out_size):
return out_size
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from torch.nn import Module
class Identity(Module):
def forward(self, input):
return input
def input_spatial_size(self, out_size):
return out_size
......@@ -6,7 +6,7 @@
import torch
from .metadata import Metadata
from ..utils import toLongTensor, dim_fn
from .utils import toLongTensor, dim_fn
from .sparseConvNetTensor import SparseConvNetTensor
class InputBatch(SparseConvNetTensor):
......
......@@ -5,9 +5,9 @@
# LICENSE file in the root directory of this source tree.
from ..utils import *
from .metadata import Metadata
from .inputBatch import InputBatch
from .sparseConvNetTensor import SparseConvNetTensor
from ..metadata import Metadata
from ..inputBatch import InputBatch
from ..sparseConvNetTensor import SparseConvNetTensor
from .sparseModule import SparseModule
from .averagePooling import AveragePooling
from .batchNormalization import BatchNormalization, BatchNormReLU, BatchNormLeakyReLU, BatchNormalizationInTensor
......
......@@ -17,7 +17,7 @@ import torch
import sparseconvnet as s
from . import SparseModule
from ..utils import toLongTensor, typed_fn, optionalTensor, nullptr
from .sparseConvNetTensor import SparseConvNetTensor
from ..sparseConvNetTensor import SparseConvNetTensor
import math
......
......@@ -8,7 +8,7 @@ import torch
import sparseconvnet
from . import SparseModule
from ..utils import toLongTensor, dim_typed_fn, optionalTensor, nullptr
from .sparseConvNetTensor import SparseConvNetTensor
from ..sparseConvNetTensor import SparseConvNetTensor
class AveragePooling(SparseModule):
......
......@@ -19,7 +19,7 @@ import torch
import sparseconvnet
from . import SparseModule
from ..utils import toLongTensor, typed_fn, optionalTensor, nullptr
from .sparseConvNetTensor import SparseConvNetTensor
from ..sparseConvNetTensor import SparseConvNetTensor
class BatchNormalization(SparseModule):
def __init__(
......
......@@ -19,7 +19,7 @@ import torch
import sparseconvnet
from . import SparseModule
from ..utils import toLongTensor, typed_fn
from .sparseConvNetTensor import SparseConvNetTensor
from ..sparseConvNetTensor import SparseConvNetTensor
class BatchwiseDropout(SparseModule):
def __init__(
......
......@@ -15,7 +15,7 @@ import torch
import sparseconvnet
from . import SparseModule
from ..utils import toLongTensor, dim_typed_fn, optionalTensor, nullptr, set
from .sparseConvNetTensor import SparseConvNetTensor
from ..sparseConvNetTensor import SparseConvNetTensor
class CAddTable(SparseModule):
......
......@@ -8,7 +8,7 @@ import torch
import sparseconvnet
from torch.legacy.nn import ConcatTable as C
from ..utils import toLongTensor, dim_typed_fn, optionalTensor, nullptr, set
from .sparseConvNetTensor import SparseConvNetTensor
from ..sparseConvNetTensor import SparseConvNetTensor
class ConcatTable(C):
......
......@@ -8,7 +8,7 @@ import torch
from . import SparseModule
import sparseconvnet as s
from ..utils import toLongTensor, dim_typed_fn, optionalTensor, nullptr
from .sparseConvNetTensor import SparseConvNetTensor
from ..sparseConvNetTensor import SparseConvNetTensor
class Convolution(SparseModule):
......
......@@ -9,7 +9,7 @@ from torch.legacy.nn import Module
import sparseconvnet as s
from . import SparseModule
from ..utils import toLongTensor, dim_typed_fn, optionalTensor, nullptr
from .sparseConvNetTensor import SparseConvNetTensor
from ..sparseConvNetTensor import SparseConvNetTensor
class Deconvolution(SparseModule):
......
......@@ -8,7 +8,7 @@ import torch
import sparseconvnet as s
from torch.legacy.nn import Container
from ..utils import toLongTensor, typed_fn, optionalTensor, nullptr, set
from .sparseConvNetTensor import SparseConvNetTensor
from ..sparseConvNetTensor import SparseConvNetTensor
from .batchNormalization import *
from .affineReLUTrivialConvolution import AffineReLUTrivialConvolution
from .validConvolution import ValidConvolution
......
......@@ -15,8 +15,8 @@ dimension : of the input field
import torch
from . import SparseModule
from ..utils import dim_fn, nullptr
from .sparseConvNetTensor import SparseConvNetTensor
from .metadata import Metadata
from ..sparseConvNetTensor import SparseConvNetTensor
from ..metadata import Metadata
class DenseToSparse(SparseModule):
def __init__(self, dimension):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment