Commit ed2a1c04 authored by Benjamin Thomas Graham's avatar Benjamin Thomas Graham
Browse files

PyTorch 1.0

parent 3aab2c31
......@@ -128,10 +128,10 @@ python VGGplus.py
## Setup
Tested with Ubuntu 16.04, Python 3.6 in [Miniconda](https://conda.io/miniconda.html) and PyTorch v0.4 (with merged Tensors/Variables).
Tested with Ubuntu 16.04, Python 3.6 in [Miniconda](https://conda.io/miniconda.html) and PyTorch 1.0.
```
conda install pytorch -c pytorch
conda install pytorch-nightly -c pytorch # See https://pytorch.org/get-started/locally/
conda install google-sparsehash -c bioconda # OR apt-get install libsparsehash-dev
conda install -c anaconda pillow
git clone git@github.com:facebookresearch/SparseConvNet.git
......
#!/bin/bash
rm -rf build/ dist/ sparseconvnet.egg-info sparseconvnet_SCN*.so
rm -rf build/ dist/ sparseconvnet.egg-info
python setup.py install
......@@ -62,7 +62,7 @@ double cpu_Convolution_updateOutput(
// auto w = weight.select(0, i);
// auto output_rows = at::mm(input_rows, w);
// output_features.index_add_(0, rt.select(1, 1), output_rows);
auto input_rows = input_features.type().tensor({nRules, ip});
auto input_rows = at::empty({nRules, ip}, input_features.type());
rule_index_select<T>(input_rows, input_features, nRules, &r[0]);
auto w = weight.select(0, i);
auto output_rows = at::mm(input_rows, w);
......@@ -105,9 +105,9 @@ void cpu_Convolution_backward(
// at::mm_out(dw, input_rows.t(), d_output_rows);
// auto d_input_rows = at::mm(d_output_rows, w.t());
// d_input_features.index_add_(0, rt.select(1, 0), d_input_rows);
auto input_rows = input_features.type().tensor({nRules, ip});
auto input_rows = at::empty({nRules, ip}, input_features.type());
rule_index_select<T>(input_rows, input_features, nRules, &r[0]);
auto d_output_rows = d_output_features.type().tensor({nRules, op});
auto d_output_rows = at::empty({nRules, op}, d_output_features.type());
rule_index_select<T>(d_output_rows, d_output_features, nRules, &r[1]);
at::mm_out(dw, input_rows.t(), d_output_rows);
auto d_input_rows = at::mm(d_output_rows, w.t());
......@@ -144,7 +144,7 @@ double cpu_SubmanifoldConvolution_updateOutput(
// auto w = weight.select(0, i);
// auto output_rows = at::mm(input_rows, w);
// output_features.index_add_(0, rt.select(1, 1), output_rows);
auto input_rows = input_features.type().tensor({nRules, ip});
auto input_rows = at::empty({nRules, ip}, input_features.type());
rule_index_select<T>(input_rows, input_features, nRules, &r[0]);
auto w = weight.select(0, i);
auto output_rows = at::mm(input_rows, w);
......@@ -186,9 +186,9 @@ void cpu_SubmanifoldConvolution_backward(
// at::mm_out(dw, input_rows.t(), d_output_rows);
// auto d_input_rows = at::mm(d_output_rows, w.t());
// d_input_features.index_add_(0, rt.select(1, 0), d_input_rows);
auto input_rows = input_features.type().tensor({nRules, ip});
auto input_rows = at::empty({nRules, ip}, input_features.type());
rule_index_select<T>(input_rows, input_features, nRules, &r[0]);
auto d_output_rows = d_output_features.type().tensor({nRules, op});
auto d_output_rows = at::empty({nRules, op}, d_output_features.type());
rule_index_select<T>(d_output_rows, d_output_features, nRules, &r[1]);
at::mm_out(dw, input_rows.t(), d_output_rows);
auto d_input_rows = at::mm(d_output_rows, w.t());
......@@ -224,7 +224,7 @@ double cpu_PermutohedralSubmanifoldConvolution_updateOutput(
// auto w = weight.select(0, i);
// auto output_rows = at::mm(input_rows, w);
// output_features.index_add_(0, rt.select(1, 1), output_rows);
auto input_rows = input_features.type().tensor({nRules, ip});
auto input_rows = at::empty({nRules, ip}, input_features.type());
rule_index_select<T>(input_rows, input_features, nRules, &r[0]);
auto w = weight.select(0, i);
auto output_rows = at::mm(input_rows, w);
......@@ -265,9 +265,9 @@ void cpu_PermutohedralSubmanifoldConvolution_backward(
// at::mm_out(dw, input_rows.t(), d_output_rows);
// auto d_input_rows = at::mm(d_output_rows, w.t());
// d_input_features.index_add_(0, rt.select(1, 0), d_input_rows);
auto input_rows = input_features.type().tensor({nRules, ip});
auto input_rows = at::empty({nRules, ip}, input_features.type());
rule_index_select<T>(input_rows, input_features, nRules, &r[0]);
auto d_output_rows = d_output_features.type().tensor({nRules, op});
auto d_output_rows = at::empty({nRules, op}, d_output_features.type());
rule_index_select<T>(d_output_rows, d_output_features, nRules, &r[1]);
at::mm_out(dw, input_rows.t(), d_output_rows);
auto d_input_rows = at::mm(d_output_rows, w.t());
......@@ -307,7 +307,7 @@ double cpu_FullConvolution_updateOutput(
// auto w = weight.select(0, i);
// auto output_rows = at::mm(input_rows, w);
// output_features.index_add_(0, rt.select(1, 1), output_rows);
auto input_rows = input_features.type().tensor({nRules, ip});
auto input_rows = at::empty({nRules, ip}, input_features.type());
rule_index_select<T>(input_rows, input_features, nRules, &r[0]);
auto w = weight.select(0, i);
auto output_rows = at::mm(input_rows, w);
......@@ -352,9 +352,9 @@ void cpu_FullConvolution_backward(
// at::mm_out(dw, input_rows.t(), d_output_rows);
// auto d_input_rows = at::mm(d_output_rows, w.t());
// d_input_features.index_add_(0, rt.select(1, 0), d_input_rows);
auto input_rows = input_features.type().tensor({nRules, ip});
auto input_rows = at::empty({nRules, ip}, input_features.type());
rule_index_select<T>(input_rows, input_features, nRules, &r[0]);
auto d_output_rows = d_output_features.type().tensor({nRules, op});
auto d_output_rows = at::empty({nRules, op}, d_output_features.type());
rule_index_select<T>(d_output_rows, d_output_features, nRules, &r[1]);
at::mm_out(dw, input_rows.t(), d_output_rows);
auto d_input_rows = at::mm(d_output_rows, w.t());
......@@ -393,7 +393,7 @@ double cpu_RandomizedStrideConvolution_updateOutput(
// auto w = weight.select(0, i);
// auto output_rows = at::mm(input_rows, w);
// output_features.index_add_(0, rt.select(1, 1), output_rows);
auto input_rows = input_features.type().tensor({nRules, ip});
auto input_rows = at::empty({nRules, ip}, input_features.type());
rule_index_select<T>(input_rows, input_features, nRules, &r[0]);
auto w = weight.select(0, i);
auto output_rows = at::mm(input_rows, w);
......@@ -436,9 +436,9 @@ void cpu_RandomizedStrideConvolution_backward(
// at::mm_out(dw, input_rows.t(), d_output_rows);
// auto d_input_rows = at::mm(d_output_rows, w.t());
// d_input_features.index_add_(0, rt.select(1, 0), d_input_rows);
auto input_rows = input_features.type().tensor({nRules, ip});
auto input_rows = at::empty({nRules, ip}, input_features.type());
rule_index_select<T>(input_rows, input_features, nRules, &r[0]);
auto d_output_rows = d_output_features.type().tensor({nRules, op});
auto d_output_rows = at::empty({nRules, op}, d_output_features.type());
rule_index_select<T>(d_output_rows, d_output_features, nRules, &r[1]);
at::mm_out(dw, input_rows.t(), d_output_rows);
auto d_input_rows = at::mm(d_output_rows, w.t());
......
......@@ -34,7 +34,7 @@ double cpu_Deconvolution_updateOutput(
// auto w = weight.select(0, i);
// auto output_rows = at::mm(input_rows, w);
// output_features.index_add_(0, rt.select(1, 0), output_rows);
auto input_rows = input_features.type().tensor({nRules, ip});
auto input_rows = at::empty({nRules, ip}, input_features.type());
rule_index_select<T>(input_rows, input_features, nRules, &r[1]);
auto w = weight.select(0, i);
auto output_rows = at::mm(input_rows, w);
......@@ -77,9 +77,9 @@ void cpu_Deconvolution_backward(
// at::mm_out(dw, input_rows.t(), d_output_rows);
// auto d_input_rows = at::mm(d_output_rows, w.t());
// d_input_features.index_add_(0, rt.select(1, 1), d_input_rows);
auto input_rows = input_features.type().tensor({nRules, ip});
auto input_rows = at::empty({nRules, ip}, d_output_features.type());
rule_index_select<T>(input_rows, input_features, nRules, &r[1]);
auto d_output_rows = d_output_features.type().tensor({nRules, op});
auto d_output_rows = at::empty({nRules, op}, d_output_features.type());
rule_index_select<T>(d_output_rows, d_output_features, nRules, &r[0]);
at::mm_out(dw, input_rows.t(), d_output_rows);
auto d_input_rows = at::mm(d_output_rows, w.t());
......
......@@ -4,7 +4,7 @@
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
#include <torch/torch.h>
#include <torch/extension.h>
#include "sparseconvnet.h"
......
......@@ -9,7 +9,7 @@
#include <omp.h>
#endif
#include <torch/torch.h>
#include <torch/extension.h>
#include "Metadata/Metadata.cpp"
template class Metadata<1>;
......
......@@ -9,10 +9,9 @@
#include <omp.h>
#endif
//#include <ATen/ATen.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <torch/torch.h>
#include <torch/extension.h>
#include "Metadata/Metadata.cpp"
template class Metadata<1>;
......
......@@ -29,8 +29,8 @@ class SparseConvNetTensor(object):
return self
return self.features.type()
def cuda(self, async=False):
self.features = self.features.cuda(async=async)
def cuda(self):
self.features = self.features.cuda()
return self
def cpu(self):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment