Commit 159e5f9a authored by Benjamin Thomas Graham's avatar Benjamin Thomas Graham
Browse files

example

parent aa071d62
...@@ -32,6 +32,7 @@ model = scn.Sequential().add( ...@@ -32,6 +32,7 @@ model = scn.Sequential().add(
# output will be 10x10 # output will be 10x10
inputSpatialSize = model.input_spatial_size(torch.LongTensor([10, 10])) inputSpatialSize = model.input_spatial_size(torch.LongTensor([10, 10]))
input_layer = scn.InputLayer(2, inputSpatialSize) input_layer = scn.InputLayer(2, inputSpatialSize)
bl_input_layer = scn.BLInputLayer(2, inputSpatialSize)
msgs = [[" X X XXX X X XX X X XX XXX X XXX ", msgs = [[" X X XXX X X XX X X XX XXX X XXX ",
" X X X X X X X X X X X X X X X X ", " X X X X X X X X X X X X X X X X ",
...@@ -46,7 +47,7 @@ msgs = [[" X X XXX X X XX X X XX XXX X XXX ", ...@@ -46,7 +47,7 @@ msgs = [[" X X XXX X X XX X X XX XXX X XXX ",
" X X XXXX x x x x xxxx x ",]] " X X XXXX x x x x xxxx x ",]]
# Create Nx3 and Nx1 vectors to encode the messages above: # Create Nx3 and Nx1 vectors to encode the messages above using InputLayer:
locations = [] locations = []
features = [] features = []
for batchIdx, msg in enumerate(msgs): for batchIdx, msg in enumerate(msgs):
...@@ -65,3 +66,30 @@ output = model(input) ...@@ -65,3 +66,30 @@ output = model(input)
# Output is 2x32x10x10: our minibatch has 2 samples, the network has 32 output # Output is 2x32x10x10: our minibatch has 2 samples, the network has 32 output
# feature planes, and 10x10 is the spatial size of the output. # feature planes, and 10x10 is the spatial size of the output.
print('Output SparseConvNetTensor:', output) print('Output SparseConvNetTensor:', output)
# Alternatively:
# Create Nx3 and Nx1 vectors to encode the messages above using BLInputLayer:
batch=[]
for batchIdx, msg in enumerate(msgs):
l,f=[],[]
for y, line in enumerate(msg):
for x, c in enumerate(line):
if c == 'X':
l.append([y, x]) #Locations
f.append([1]) #Features
batch.append([torch.LongTensor(l),torch.FloatTensor(f)])
batch=scn.prepare_BLInput(batch)
batch[1]=batch[1].to(device)
input = bl_input_layer(batch)
print('Input SparseConvNetTensor:', input)
output = model(input)
# Output is 2x32x10x10: our minibatch has 2 samples, the network has 32 output
# feature planes, and 10x10 is the spatial size of the output.
print('Output SparseConvNetTensor:', output)
...@@ -53,7 +53,7 @@ double AffineReluTrivialConvolution_updateOutput(at::Tensor &input_features, ...@@ -53,7 +53,7 @@ double AffineReluTrivialConvolution_updateOutput(at::Tensor &input_features,
at::Tensor &affineWeight, at::Tensor &affineWeight,
at::Tensor &affineBias, at::Tensor &affineBias,
at::Tensor &convWeight) { at::Tensor &convWeight) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
return cuda_AffineReluTrivialConvolution_updateOutput<float>( return cuda_AffineReluTrivialConvolution_updateOutput<float>(
input_features, output_features, affineWeight, affineBias, convWeight); input_features, output_features, affineWeight, affineBias, convWeight);
else else
...@@ -67,7 +67,7 @@ void AffineReluTrivialConvolution_backward( ...@@ -67,7 +67,7 @@ void AffineReluTrivialConvolution_backward(
at::Tensor &d_affineWeight, at::Tensor &affineBias, at::Tensor &d_affineWeight, at::Tensor &affineBias,
at::Tensor &d_affineBias, at::Tensor &convWeight, at::Tensor &d_convWeight, at::Tensor &d_affineBias, at::Tensor &convWeight, at::Tensor &d_convWeight,
bool additiveGrad) { bool additiveGrad) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_AffineReluTrivialConvolution_backward<float>( cuda_AffineReluTrivialConvolution_backward<float>(
input_features, d_input_features, d_output_features, affineWeight, input_features, d_input_features, d_output_features, affineWeight,
d_affineWeight, affineBias, d_affineBias, convWeight, d_convWeight, d_affineWeight, affineBias, d_affineBias, convWeight, d_convWeight,
...@@ -84,7 +84,7 @@ void BatchNormalization_updateOutput( ...@@ -84,7 +84,7 @@ void BatchNormalization_updateOutput(
at::Tensor &saveMean, at::Tensor &saveInvStd, at::Tensor &runningMean, at::Tensor &saveMean, at::Tensor &saveInvStd, at::Tensor &runningMean,
at::Tensor &runningVar, at::Tensor &weight, at::Tensor &bias, double eps, at::Tensor &runningVar, at::Tensor &weight, at::Tensor &bias, double eps,
double momentum, bool train, double leakiness) { double momentum, bool train, double leakiness) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
cuda_BatchNormalization_updateOutput<float>( cuda_BatchNormalization_updateOutput<float>(
input_features, output_features, saveMean, saveInvStd, runningMean, input_features, output_features, saveMean, saveInvStd, runningMean,
runningVar, weight, bias, eps, momentum, train, leakiness); runningVar, weight, bias, eps, momentum, train, leakiness);
...@@ -100,7 +100,7 @@ void BatchNormalization_backward( ...@@ -100,7 +100,7 @@ void BatchNormalization_backward(
at::Tensor &saveMean, at::Tensor &saveInvStd, at::Tensor &runningMean, at::Tensor &saveMean, at::Tensor &saveInvStd, at::Tensor &runningMean,
at::Tensor &runningVar, at::Tensor &weight, at::Tensor &bias, at::Tensor &runningVar, at::Tensor &weight, at::Tensor &bias,
at::Tensor &d_weight, at::Tensor &d_bias, double leakiness) { at::Tensor &d_weight, at::Tensor &d_bias, double leakiness) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_BatchNormalization_backward<float>( cuda_BatchNormalization_backward<float>(
input_features, d_input_features, output_features, d_output_features, input_features, d_input_features, output_features, d_output_features,
saveMean, saveInvStd, runningMean, runningVar, weight, bias, d_weight, saveMean, saveInvStd, runningMean, runningVar, weight, bias, d_weight,
...@@ -116,7 +116,7 @@ void BatchwiseMultiplicativeDropout_updateOutput(at::Tensor &input_features, ...@@ -116,7 +116,7 @@ void BatchwiseMultiplicativeDropout_updateOutput(at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &output_features,
at::Tensor &noise, at::Tensor &noise,
double alpha) { double alpha) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
cuda_BatchwiseMultiplicativeDropout_updateOutput<float>( cuda_BatchwiseMultiplicativeDropout_updateOutput<float>(
input_features, output_features, noise, alpha); input_features, output_features, noise, alpha);
else else
...@@ -127,7 +127,7 @@ void BatchwiseMultiplicativeDropout_updateOutput(at::Tensor &input_features, ...@@ -127,7 +127,7 @@ void BatchwiseMultiplicativeDropout_updateOutput(at::Tensor &input_features,
void BatchwiseMultiplicativeDropout_updateGradInput( void BatchwiseMultiplicativeDropout_updateGradInput(
at::Tensor &input_features, at::Tensor &d_input_features, at::Tensor &input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &noise, double alpha) { at::Tensor &d_output_features, at::Tensor &noise, double alpha) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_BatchwiseMultiplicativeDropout_updateGradInput<float>( cuda_BatchwiseMultiplicativeDropout_updateGradInput<float>(
input_features, d_input_features, d_output_features, noise, alpha); input_features, d_input_features, d_output_features, noise, alpha);
else else
...@@ -137,7 +137,7 @@ void BatchwiseMultiplicativeDropout_updateGradInput( ...@@ -137,7 +137,7 @@ void BatchwiseMultiplicativeDropout_updateGradInput(
void LeakyReLU_updateOutput(at::Tensor &input_features, void LeakyReLU_updateOutput(at::Tensor &input_features,
at::Tensor &output_features, double alpha) { at::Tensor &output_features, double alpha) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
cuda_LeakyReLU_updateOutput<float>(input_features, output_features, alpha); cuda_LeakyReLU_updateOutput<float>(input_features, output_features, alpha);
else else
cpu_LeakyReLU_updateOutput<float>(input_features, output_features, alpha); cpu_LeakyReLU_updateOutput<float>(input_features, output_features, alpha);
...@@ -146,7 +146,7 @@ void LeakyReLU_updateOutput(at::Tensor &input_features, ...@@ -146,7 +146,7 @@ void LeakyReLU_updateOutput(at::Tensor &input_features,
void LeakyReLU_updateGradInput(at::Tensor &input_features, void LeakyReLU_updateGradInput(at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, double alpha) { at::Tensor &d_output_features, double alpha) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_LeakyReLU_updateGradInput<float>(input_features, d_input_features, cuda_LeakyReLU_updateGradInput<float>(input_features, d_input_features,
d_output_features, alpha); d_output_features, alpha);
else else
...@@ -157,7 +157,7 @@ void LeakyReLU_updateGradInput(at::Tensor &input_features, ...@@ -157,7 +157,7 @@ void LeakyReLU_updateGradInput(at::Tensor &input_features,
double NetworkInNetwork_updateOutput(at::Tensor &input_features, double NetworkInNetwork_updateOutput(at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &output_features,
at::Tensor &weight, at::Tensor &bias) { at::Tensor &weight, at::Tensor &bias) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
return cuda_NetworkInNetwork_updateOutput<float>( return cuda_NetworkInNetwork_updateOutput<float>(
input_features, output_features, weight, bias); input_features, output_features, weight, bias);
else else
...@@ -168,7 +168,7 @@ double NetworkInNetwork_updateOutput(at::Tensor &input_features, ...@@ -168,7 +168,7 @@ double NetworkInNetwork_updateOutput(at::Tensor &input_features,
void NetworkInNetwork_updateGradInput(at::Tensor &d_input_features, void NetworkInNetwork_updateGradInput(at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &d_output_features,
at::Tensor &weight) { at::Tensor &weight) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_NetworkInNetwork_updateGradInput<float>(d_input_features, cuda_NetworkInNetwork_updateGradInput<float>(d_input_features,
d_output_features, weight); d_output_features, weight);
else else
...@@ -180,7 +180,7 @@ void NetworkInNetwork_accGradParameters(at::Tensor &input_features, ...@@ -180,7 +180,7 @@ void NetworkInNetwork_accGradParameters(at::Tensor &input_features,
at::Tensor &d_output_features, at::Tensor &d_output_features,
at::Tensor &d_weight, at::Tensor &d_weight,
at::Tensor &d_bias) { at::Tensor &d_bias) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_NetworkInNetwork_accGradParameters<float>( cuda_NetworkInNetwork_accGradParameters<float>(
input_features, d_output_features, d_weight, d_bias); input_features, d_output_features, d_weight, d_bias);
else else
...@@ -191,7 +191,7 @@ template <Int Dimension> ...@@ -191,7 +191,7 @@ template <Int Dimension>
void ActivePooling_updateOutput(at::Tensor &inputSize, Metadata<Dimension> &m, void ActivePooling_updateOutput(at::Tensor &inputSize, Metadata<Dimension> &m,
at::Tensor &input_features, at::Tensor &input_features,
at::Tensor &output_features, bool average) { at::Tensor &output_features, bool average) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
cuda_ActivePooling_updateOutput<float, Dimension>( cuda_ActivePooling_updateOutput<float, Dimension>(
inputSize, m, input_features, output_features, average); inputSize, m, input_features, output_features, average);
else else
...@@ -203,7 +203,7 @@ template <Int Dimension> ...@@ -203,7 +203,7 @@ template <Int Dimension>
void ActivePooling_updateGradInput( void ActivePooling_updateGradInput(
at::Tensor &inputSize, Metadata<Dimension> &m, at::Tensor &input_features, at::Tensor &inputSize, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &d_output_features, bool average) { at::Tensor &d_input_features, at::Tensor &d_output_features, bool average) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
return cuda_ActivePooling_updateGradInput<float, Dimension>( return cuda_ActivePooling_updateGradInput<float, Dimension>(
inputSize, m, input_features, d_input_features, d_output_features, inputSize, m, input_features, d_input_features, d_output_features,
average); average);
...@@ -219,7 +219,7 @@ void AveragePooling_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize, ...@@ -219,7 +219,7 @@ void AveragePooling_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &input_features, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &output_features,
long nFeaturesToDrop) { long nFeaturesToDrop) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
cuda_AveragePooling_updateOutput<float, Dimension>( cuda_AveragePooling_updateOutput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features, inputSize, outputSize, poolSize, poolStride, m, input_features,
output_features, nFeaturesToDrop); output_features, nFeaturesToDrop);
...@@ -234,7 +234,7 @@ void AveragePooling_updateGradInput( ...@@ -234,7 +234,7 @@ void AveragePooling_updateGradInput(
at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features, at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &d_output_features, at::Tensor &d_input_features, at::Tensor &d_output_features,
long nFeaturesToDrop) { long nFeaturesToDrop) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_AveragePooling_updateGradInput<float, Dimension>( cuda_AveragePooling_updateGradInput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features, inputSize, outputSize, poolSize, poolStride, m, input_features,
d_input_features, d_output_features, nFeaturesToDrop); d_input_features, d_output_features, nFeaturesToDrop);
...@@ -250,7 +250,7 @@ Convolution_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize, ...@@ -250,7 +250,7 @@ Convolution_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
Metadata<Dimension> &m, at::Tensor &input_features, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight, at::Tensor &output_features, at::Tensor &weight,
at::Tensor &bias) { at::Tensor &bias) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
return cuda_Convolution_updateOutput<float, Dimension>( return cuda_Convolution_updateOutput<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features, inputSize, outputSize, filterSize, filterStride, m, input_features,
output_features, weight, bias); output_features, weight, bias);
...@@ -266,7 +266,7 @@ void Convolution_backward(at::Tensor &inputSize, at::Tensor &outputSize, ...@@ -266,7 +266,7 @@ void Convolution_backward(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &d_input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight, at::Tensor &d_output_features, at::Tensor &weight,
at::Tensor &d_weight, at::Tensor &d_bias) { at::Tensor &d_weight, at::Tensor &d_bias) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_Convolution_backward<float, Dimension>( cuda_Convolution_backward<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features, inputSize, outputSize, filterSize, filterStride, m, input_features,
d_input_features, d_output_features, weight, d_weight, d_bias); d_input_features, d_output_features, weight, d_weight, d_bias);
...@@ -280,7 +280,7 @@ double SubmanifoldConvolution_updateOutput( ...@@ -280,7 +280,7 @@ double SubmanifoldConvolution_updateOutput(
at::Tensor &inputSize, at::Tensor &filterSize, Metadata<Dimension> &m, at::Tensor &inputSize, at::Tensor &filterSize, Metadata<Dimension> &m,
at::Tensor &input_features, at::Tensor &output_features, at::Tensor &weight, at::Tensor &input_features, at::Tensor &output_features, at::Tensor &weight,
at::Tensor &bias) { at::Tensor &bias) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
return cuda_SubmanifoldConvolution_updateOutput<float, Dimension>( return cuda_SubmanifoldConvolution_updateOutput<float, Dimension>(
inputSize, filterSize, m, input_features, output_features, weight, inputSize, filterSize, m, input_features, output_features, weight,
bias); bias);
...@@ -295,7 +295,7 @@ void SubmanifoldConvolution_backward( ...@@ -295,7 +295,7 @@ void SubmanifoldConvolution_backward(
at::Tensor &input_features, at::Tensor &d_input_features, at::Tensor &input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight, at::Tensor &d_weight, at::Tensor &d_output_features, at::Tensor &weight, at::Tensor &d_weight,
at::Tensor &d_bias) { at::Tensor &d_bias) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_SubmanifoldConvolution_backward<float, Dimension>( cuda_SubmanifoldConvolution_backward<float, Dimension>(
inputSize, filterSize, m, input_features, d_input_features, inputSize, filterSize, m, input_features, d_input_features,
d_output_features, weight, d_weight, d_bias); d_output_features, weight, d_weight, d_bias);
...@@ -308,7 +308,7 @@ template <Int Dimension> ...@@ -308,7 +308,7 @@ template <Int Dimension>
double PermutohedralSubmanifoldConvolution_updateOutput( double PermutohedralSubmanifoldConvolution_updateOutput(
at::Tensor &inputSize, Metadata<Dimension> &m, at::Tensor &input_features, at::Tensor &inputSize, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight, at::Tensor &bias) { at::Tensor &output_features, at::Tensor &weight, at::Tensor &bias) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
return cuda_PermutohedralSubmanifoldConvolution_updateOutput<float, return cuda_PermutohedralSubmanifoldConvolution_updateOutput<float,
Dimension>( Dimension>(
inputSize, m, input_features, output_features, weight, bias); inputSize, m, input_features, output_features, weight, bias);
...@@ -322,7 +322,7 @@ void PermutohedralSubmanifoldConvolution_backward( ...@@ -322,7 +322,7 @@ void PermutohedralSubmanifoldConvolution_backward(
at::Tensor &inputSize, Metadata<Dimension> &m, at::Tensor &input_features, at::Tensor &inputSize, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &d_output_features, at::Tensor &d_input_features, at::Tensor &d_output_features,
at::Tensor &weight, at::Tensor &d_weight, at::Tensor &d_bias) { at::Tensor &weight, at::Tensor &d_weight, at::Tensor &d_bias) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_PermutohedralSubmanifoldConvolution_backward<float, Dimension>( cuda_PermutohedralSubmanifoldConvolution_backward<float, Dimension>(
inputSize, m, input_features, d_input_features, d_output_features, inputSize, m, input_features, d_input_features, d_output_features,
weight, d_weight, d_bias); weight, d_weight, d_bias);
...@@ -337,7 +337,7 @@ double FullConvolution_updateOutput( ...@@ -337,7 +337,7 @@ double FullConvolution_updateOutput(
at::Tensor &filterStride, Metadata<Dimension> &mIn, at::Tensor &filterStride, Metadata<Dimension> &mIn,
Metadata<Dimension> &mOut, at::Tensor &input_features, Metadata<Dimension> &mOut, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight, at::Tensor &bias) { at::Tensor &output_features, at::Tensor &weight, at::Tensor &bias) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
return cuda_FullConvolution_updateOutput<float, Dimension>( return cuda_FullConvolution_updateOutput<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, mIn, mOut, inputSize, outputSize, filterSize, filterStride, mIn, mOut,
input_features, output_features, weight, bias); input_features, output_features, weight, bias);
...@@ -355,7 +355,7 @@ void FullConvolution_backward(at::Tensor &inputSize, at::Tensor &outputSize, ...@@ -355,7 +355,7 @@ void FullConvolution_backward(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &d_input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight, at::Tensor &d_output_features, at::Tensor &weight,
at::Tensor &d_weight, at::Tensor &d_bias) { at::Tensor &d_weight, at::Tensor &d_bias) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_FullConvolution_backward<float, Dimension>( cuda_FullConvolution_backward<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, mIn, mOut, inputSize, outputSize, filterSize, filterStride, mIn, mOut,
input_features, d_input_features, d_output_features, weight, d_weight, input_features, d_input_features, d_output_features, weight, d_weight,
...@@ -372,7 +372,7 @@ double RandomizedStrideConvolution_updateOutput( ...@@ -372,7 +372,7 @@ double RandomizedStrideConvolution_updateOutput(
at::Tensor &filterStride, Metadata<Dimension> &m, at::Tensor &filterStride, Metadata<Dimension> &m,
at::Tensor &input_features, at::Tensor &output_features, at::Tensor &weight, at::Tensor &input_features, at::Tensor &output_features, at::Tensor &weight,
at::Tensor &bias) { at::Tensor &bias) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
return cuda_RandomizedStrideConvolution_updateOutput<float, Dimension>( return cuda_RandomizedStrideConvolution_updateOutput<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features, inputSize, outputSize, filterSize, filterStride, m, input_features,
output_features, weight, bias); output_features, weight, bias);
...@@ -388,7 +388,7 @@ void RandomizedStrideConvolution_backward( ...@@ -388,7 +388,7 @@ void RandomizedStrideConvolution_backward(
at::Tensor &input_features, at::Tensor &d_input_features, at::Tensor &input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight, at::Tensor &d_weight, at::Tensor &d_output_features, at::Tensor &weight, at::Tensor &d_weight,
at::Tensor &d_bias) { at::Tensor &d_bias) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_RandomizedStrideConvolution_backward<float, Dimension>( cuda_RandomizedStrideConvolution_backward<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features, inputSize, outputSize, filterSize, filterStride, m, input_features,
d_input_features, d_output_features, weight, d_weight, d_bias); d_input_features, d_output_features, weight, d_weight, d_bias);
...@@ -404,7 +404,7 @@ Deconvolution_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize, ...@@ -404,7 +404,7 @@ Deconvolution_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
Metadata<Dimension> &m, at::Tensor &input_features, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight, at::Tensor &output_features, at::Tensor &weight,
at::Tensor &bias) { at::Tensor &bias) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
return cuda_Deconvolution_updateOutput<float, Dimension>( return cuda_Deconvolution_updateOutput<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features, inputSize, outputSize, filterSize, filterStride, m, input_features,
output_features, weight, bias); output_features, weight, bias);
...@@ -420,7 +420,7 @@ void Deconvolution_backward(at::Tensor &inputSize, at::Tensor &outputSize, ...@@ -420,7 +420,7 @@ void Deconvolution_backward(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &d_input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight, at::Tensor &d_output_features, at::Tensor &weight,
at::Tensor &d_weight, at::Tensor &d_bias) { at::Tensor &d_weight, at::Tensor &d_bias) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_Deconvolution_backward<float, Dimension>( cuda_Deconvolution_backward<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features, inputSize, outputSize, filterSize, filterStride, m, input_features,
d_input_features, d_output_features, weight, d_weight, d_bias); d_input_features, d_output_features, weight, d_weight, d_bias);
...@@ -435,7 +435,7 @@ void InputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor &spatialSize, ...@@ -435,7 +435,7 @@ void InputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor &spatialSize,
at::Tensor &input_features, at::Tensor &input_features,
at::Tensor &output_features, long batchSize, at::Tensor &output_features, long batchSize,
long mode) { long mode) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
cuda_InputLayer_updateOutput<float, Dimension>( cuda_InputLayer_updateOutput<float, Dimension>(
m, spatialSize, input_coords, input_features, output_features, m, spatialSize, input_coords, input_features, output_features,
batchSize, mode); batchSize, mode);
...@@ -448,7 +448,7 @@ template <Int Dimension> ...@@ -448,7 +448,7 @@ template <Int Dimension>
void InputLayer_updateGradInput(Metadata<Dimension> &m, void InputLayer_updateGradInput(Metadata<Dimension> &m,
at::Tensor &d_input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features) { at::Tensor &d_output_features) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_InputLayer_updateGradInput<float, Dimension>(m, d_input_features, cuda_InputLayer_updateGradInput<float, Dimension>(m, d_input_features,
d_output_features); d_output_features);
else else
...@@ -459,7 +459,7 @@ template <Int Dimension> ...@@ -459,7 +459,7 @@ template <Int Dimension>
void OutputLayer_updateOutput(Metadata<Dimension> &m, void OutputLayer_updateOutput(Metadata<Dimension> &m,
at::Tensor &input_features, at::Tensor &input_features,
at::Tensor &output_features) { at::Tensor &output_features) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
cuda_OutputLayer_updateOutput<float, Dimension>(m, input_features, cuda_OutputLayer_updateOutput<float, Dimension>(m, input_features,
output_features); output_features);
else else
...@@ -470,7 +470,7 @@ template <Int Dimension> ...@@ -470,7 +470,7 @@ template <Int Dimension>
void OutputLayer_updateGradInput(Metadata<Dimension> &m, void OutputLayer_updateGradInput(Metadata<Dimension> &m,
at::Tensor &d_input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features) { at::Tensor &d_output_features) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_OutputLayer_updateGradInput<float, Dimension>(m, d_input_features, cuda_OutputLayer_updateGradInput<float, Dimension>(m, d_input_features,
d_output_features); d_output_features);
else else
...@@ -482,7 +482,7 @@ void BLInputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor &spatialSize, ...@@ -482,7 +482,7 @@ void BLInputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor &spatialSize,
at::Tensor &input_coords, at::Tensor &input_coords,
at::Tensor &input_features, at::Tensor &input_features,
at::Tensor &output_features, long mode) { at::Tensor &output_features, long mode) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
cuda_BLInputLayer_updateOutput<float, Dimension>( cuda_BLInputLayer_updateOutput<float, Dimension>(
m, spatialSize, input_coords, input_features, output_features, mode); m, spatialSize, input_coords, input_features, output_features, mode);
else else
...@@ -493,7 +493,7 @@ template <Int Dimension> ...@@ -493,7 +493,7 @@ template <Int Dimension>
void BLInputLayer_updateGradInput(Metadata<Dimension> &m, void BLInputLayer_updateGradInput(Metadata<Dimension> &m,
at::Tensor &d_input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features) { at::Tensor &d_output_features) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_BLInputLayer_updateGradInput<float, Dimension>(m, d_input_features, cuda_BLInputLayer_updateGradInput<float, Dimension>(m, d_input_features,
d_output_features); d_output_features);
else else
...@@ -504,7 +504,7 @@ template <Int Dimension> ...@@ -504,7 +504,7 @@ template <Int Dimension>
void BLOutputLayer_updateOutput(Metadata<Dimension> &m, void BLOutputLayer_updateOutput(Metadata<Dimension> &m,
at::Tensor &input_features, at::Tensor &input_features,
at::Tensor &output_features) { at::Tensor &output_features) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
cuda_BLOutputLayer_updateOutput<float, Dimension>(m, input_features, cuda_BLOutputLayer_updateOutput<float, Dimension>(m, input_features,
output_features); output_features);
else else
...@@ -515,7 +515,7 @@ template <Int Dimension> ...@@ -515,7 +515,7 @@ template <Int Dimension>
void BLOutputLayer_updateGradInput(Metadata<Dimension> &m, void BLOutputLayer_updateGradInput(Metadata<Dimension> &m,
at::Tensor &d_input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features) { at::Tensor &d_output_features) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_BLOutputLayer_updateGradInput<float, Dimension>(m, d_input_features, cuda_BLOutputLayer_updateGradInput<float, Dimension>(m, d_input_features,
d_output_features); d_output_features);
else else
...@@ -528,7 +528,7 @@ void MaxPooling_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize, ...@@ -528,7 +528,7 @@ void MaxPooling_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
Metadata<Dimension> &m, at::Tensor &input_features, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &output_features,
long nFeaturesToDrop) { long nFeaturesToDrop) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
cuda_MaxPooling_updateOutput<float, Dimension>( cuda_MaxPooling_updateOutput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features, inputSize, outputSize, poolSize, poolStride, m, input_features,
output_features, nFeaturesToDrop); output_features, nFeaturesToDrop);
...@@ -543,7 +543,7 @@ void MaxPooling_updateGradInput( ...@@ -543,7 +543,7 @@ void MaxPooling_updateGradInput(
at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features, at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &output_features, at::Tensor &d_input_features, at::Tensor &output_features,
at::Tensor &d_output_features, long nFeaturesToDrop) { at::Tensor &d_output_features, long nFeaturesToDrop) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_MaxPooling_updateGradInput<float, Dimension>( cuda_MaxPooling_updateGradInput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features, inputSize, outputSize, poolSize, poolStride, m, input_features,
d_input_features, output_features, d_output_features, nFeaturesToDrop); d_input_features, output_features, d_output_features, nFeaturesToDrop);
...@@ -557,7 +557,7 @@ void RandomizedStrideMaxPooling_updateOutput( ...@@ -557,7 +557,7 @@ void RandomizedStrideMaxPooling_updateOutput(
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize, at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize,
at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features, at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, long nFeaturesToDrop) { at::Tensor &output_features, long nFeaturesToDrop) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
cuda_RandomizedStrideMaxPooling_updateOutput<float, Dimension>( cuda_RandomizedStrideMaxPooling_updateOutput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features, inputSize, outputSize, poolSize, poolStride, m, input_features,
output_features, nFeaturesToDrop); output_features, nFeaturesToDrop);
...@@ -572,7 +572,7 @@ void RandomizedStrideMaxPooling_updateGradInput( ...@@ -572,7 +572,7 @@ void RandomizedStrideMaxPooling_updateGradInput(
at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features, at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &output_features, at::Tensor &d_input_features, at::Tensor &output_features,
at::Tensor &d_output_features, long nFeaturesToDrop) { at::Tensor &d_output_features, long nFeaturesToDrop) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_RandomizedStrideMaxPooling_updateGradInput<float, Dimension>( cuda_RandomizedStrideMaxPooling_updateGradInput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features, inputSize, outputSize, poolSize, poolStride, m, input_features,
d_input_features, output_features, d_output_features, nFeaturesToDrop); d_input_features, output_features, d_output_features, nFeaturesToDrop);
...@@ -585,7 +585,7 @@ template <Int Dimension> ...@@ -585,7 +585,7 @@ template <Int Dimension>
void SparseToDense_updateOutput(at::Tensor &inputSize, Metadata<Dimension> &m, void SparseToDense_updateOutput(at::Tensor &inputSize, Metadata<Dimension> &m,
at::Tensor &input_features, at::Tensor &input_features,
at::Tensor &output_features, long nPlanes) { at::Tensor &output_features, long nPlanes) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
cuda_SparseToDense_updateOutput<float, Dimension>( cuda_SparseToDense_updateOutput<float, Dimension>(
inputSize, m, input_features, output_features, nPlanes); inputSize, m, input_features, output_features, nPlanes);
else else
...@@ -598,7 +598,7 @@ void SparseToDense_updateGradInput(at::Tensor &inputSize, ...@@ -598,7 +598,7 @@ void SparseToDense_updateGradInput(at::Tensor &inputSize,
at::Tensor &input_features, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features) { at::Tensor &d_output_features) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_SparseToDense_updateGradInput<float, Dimension>( cuda_SparseToDense_updateGradInput<float, Dimension>(
inputSize, m, input_features, d_input_features, d_output_features); inputSize, m, input_features, d_input_features, d_output_features);
else else
...@@ -610,7 +610,7 @@ void UnPooling_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize, ...@@ -610,7 +610,7 @@ void UnPooling_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &poolSize, at::Tensor &poolStride, at::Tensor &poolSize, at::Tensor &poolStride,
Metadata<Dimension> &m, at::Tensor &input_features, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, long nFeaturesToDrop) { at::Tensor &output_features, long nFeaturesToDrop) {
if (input_features.type().is_cuda()) if (input_features.device().type() == torch::kCUDA)
cuda_UnPooling_updateOutput<float, Dimension>( cuda_UnPooling_updateOutput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features, inputSize, outputSize, poolSize, poolStride, m, input_features,
output_features, nFeaturesToDrop); output_features, nFeaturesToDrop);
...@@ -626,7 +626,7 @@ void UnPooling_updateGradInput(at::Tensor &inputSize, at::Tensor &outputSize, ...@@ -626,7 +626,7 @@ void UnPooling_updateGradInput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &d_input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &d_output_features,
long nFeaturesToDrop) { long nFeaturesToDrop) {
if (d_output_features.type().is_cuda()) if (d_output_features.device().type() == torch::kCUDA)
cuda_UnPooling_updateGradInput<float, Dimension>( cuda_UnPooling_updateGradInput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, d_input_features, inputSize, outputSize, poolSize, poolStride, m, d_input_features,
d_output_features, nFeaturesToDrop); d_output_features, nFeaturesToDrop);
......
...@@ -124,14 +124,15 @@ def batch_location_tensors(location_tensors): ...@@ -124,14 +124,15 @@ def batch_location_tensors(location_tensors):
a.append(pad_with_batch_idx(lt,batch_idx)) a.append(pad_with_batch_idx(lt,batch_idx))
return torch.cat(a,0) return torch.cat(a,0)
def prepare_BLInput(l,f): def prepare_BLInput(batch):
with torch.no_grad(): with torch.no_grad():
n=max([x.size(0) for x in l]) n=max([l.size(0) for l,f in batch])
L=torch.empty(len(l),n,l[0].size(1),dtype=torch.int64).fill_(-1) l,f=batch[0]
F=torch.zeros(len(l),n,f[0].size(1)) L=torch.empty(len(batch),n,l.size(1),dtype=torch.int64).fill_(-1)
for i, (ll, ff) in enumerate(zip(l,f)): F=torch.zeros(len(batch),n,f.size(1))
L[i,:ll.size(0),:].copy_(ll) for i, (l, f) in enumerate(batch):
F[i,:ff.size(0),:].copy_(ff) L[i,:l.size(0),:].copy_(l)
F[i,:f.size(0),:].copy_(f)
return [L,F] return [L,F]
def checkpoint_restore(model,exp_name,name2,use_cuda=True,epoch=0): def checkpoint_restore(model,exp_name,name2,use_cuda=True,epoch=0):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment