Commit c9e5e6cd authored by Benjamin Thomas Graham's avatar Benjamin Thomas Graham
Browse files

permutohedral

parent 66986767
...@@ -12,7 +12,7 @@ void rule_index_select(at::Tensor target, at::Tensor src, Int nRules, ...@@ -12,7 +12,7 @@ void rule_index_select(at::Tensor target, at::Tensor src, Int nRules,
auto s_ptr = src.data<T>(); auto s_ptr = src.data<T>();
auto n = target.size(1); auto n = target.size(1);
Int i; Int i;
#pragma omp parallel for private(i) #pragma omp parallel for private(i)
for (i = 0; i < nRules; ++i) for (i = 0; i < nRules; ++i)
std::memcpy(t_ptr + i * n, s_ptr + rules[2 * i] * n, sizeof(T) * n); std::memcpy(t_ptr + i * n, s_ptr + rules[2 * i] * n, sizeof(T) * n);
} }
...@@ -23,7 +23,7 @@ void rule_index_add_(at::Tensor target, at::Tensor src, Int nRules, ...@@ -23,7 +23,7 @@ void rule_index_add_(at::Tensor target, at::Tensor src, Int nRules,
auto s_ptr = src.data<T>(); auto s_ptr = src.data<T>();
auto n = target.size(1); auto n = target.size(1);
Int i; Int i;
#pragma omp parallel for private(i) #pragma omp parallel for private(i)
for (i = 0; i < nRules; ++i) { for (i = 0; i < nRules; ++i) {
auto t = t_ptr + rules[2 * i] * n; auto t = t_ptr + rules[2 * i] * n;
auto s = s_ptr + i * n; auto s = s_ptr + i * n;
...@@ -197,6 +197,85 @@ void cpu_SubmanifoldConvolution_backward( ...@@ -197,6 +197,85 @@ void cpu_SubmanifoldConvolution_backward(
} }
} }
template <typename T, Int Dimension>
double cpu_PermutohedralSubmanifoldConvolution_updateOutput(
/*long*/ at::Tensor inputSize, Metadata<Dimension> &m,
/*float*/ at::Tensor input_features, /*float*/ at::Tensor output_features,
/*float*/ at::Tensor weight,
/*float*/ at::Tensor bias) {
auto _rules = m.getPermutohedralSubmanifoldRuleBook(inputSize, true);
Int nActive = m.getNActive(inputSize);
output_features.resize_({nActive, weight.size(2)});
if (bias.numel() and nActive)
output_features.copy_(bias);
else
output_features.zero_();
double flops = 0;
auto ip = weight.size(1);
auto op = weight.size(2);
for (Int i = 0; i < (Int)_rules.size(); i++) {
auto r = _rules[i];
int nRules = r.size() / 2;
if (nRules) {
flops += nRules * ip * op;
// auto rt = torch::CPU(at_kINT).tensorFromBlob(&r[0], {nRules, 2});
// auto input_rows = input_features.index_select(0, rt.select(1, 0));
// auto w = weight.select(0, i);
// auto output_rows = at::mm(input_rows, w);
// output_features.index_add_(0, rt.select(1, 1), output_rows);
auto input_rows = input_features.type().tensor({nRules, ip});
rule_index_select<T>(input_rows, input_features, nRules, &r[0]);
auto w = weight.select(0, i);
auto output_rows = at::mm(input_rows, w);
rule_index_add_<T>(output_features, output_rows, nRules, &r[1]);
}
}
return flops;
}
template <typename T, Int Dimension>
void cpu_PermutohedralSubmanifoldConvolution_backward(
/*long*/ at::Tensor inputSize, Metadata<Dimension> &m,
/*float*/ at::Tensor input_features,
/*float*/ at::Tensor d_input_features,
/*float*/ at::Tensor d_output_features, /*float*/ at::Tensor weight,
/*float*/ at::Tensor d_weight,
/*float*/ at::Tensor d_bias) {
auto _rules = m.getPermutohedralSubmanifoldRuleBook(inputSize, true);
Int nActive = m.getNActive(inputSize);
d_input_features.resize_as_(input_features);
d_input_features.zero_();
if (nActive and d_bias.numel())
at::sum_out(d_bias, d_output_features, {0}, false);
auto ip = weight.size(1);
auto op = weight.size(2);
for (Int i = 0; i < (Int)_rules.size(); i++) {
auto r = _rules[i];
int nRules = r.size() / 2;
if (nRules) {
auto w = weight.select(0, i);
auto dw = d_weight.select(0, i);
// auto rt = torch::CPU(at_kINT).tensorFromBlob(&r[0], {nRules, 2});
// auto input_rows = input_features.index_select(0, rt.select(1, 0));
// auto d_output_rows = d_output_features.index_select(0, rt.select(1,
// 1));
// at::mm_out(dw, input_rows.t(), d_output_rows);
// auto d_input_rows = at::mm(d_output_rows, w.t());
// d_input_features.index_add_(0, rt.select(1, 0), d_input_rows);
auto input_rows = input_features.type().tensor({nRules, ip});
rule_index_select<T>(input_rows, input_features, nRules, &r[0]);
auto d_output_rows = d_output_features.type().tensor({nRules, op});
rule_index_select<T>(d_output_rows, d_output_features, nRules, &r[1]);
at::mm_out(dw, input_rows.t(), d_output_rows);
auto d_input_rows = at::mm(d_output_rows, w.t());
rule_index_add_<T>(d_input_features, d_input_rows, nRules, &r[0]);
}
}
}
template <typename T, Int Dimension> template <typename T, Int Dimension>
double cpu_FullConvolution_updateOutput( double cpu_FullConvolution_updateOutput(
/*long*/ at::Tensor inputSize, /*long*/ at::Tensor outputSize, /*long*/ at::Tensor inputSize, /*long*/ at::Tensor outputSize,
......
...@@ -151,6 +151,67 @@ void cuda_SubmanifoldConvolution_backward( ...@@ -151,6 +151,67 @@ void cuda_SubmanifoldConvolution_backward(
} }
} }
template <typename T, Int Dimension>
double cuda_PermutohedralSubmanifoldConvolution_updateOutput(
/*long*/ at::Tensor inputSize, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features, /*cuda float*/ at::Tensor weight,
/*cuda float*/ at::Tensor bias) {
auto _rules = m.getPermutohedralSubmanifoldRuleBook(inputSize, true);
Int nActive = m.getNActive(inputSize);
if (nActive) {
Int ip = weight.size(1);
Int op = weight.size(2);
output_features.resize_({nActive, op});
auto iF = input_features.data<T>();
auto oF = output_features.data<T>();
auto w = weight.data<T>();
if (bias.numel())
Convolution_fp_bias(oF, bias.data<T>(), op, nActive);
else
output_features.zero_();
return dConvolution_forward2<T>(iF, oF, w, _rules, ip, ip, op, op);
} else {
return 0;
}
}
template <typename T, Int Dimension>
void cuda_PermutohedralSubmanifoldConvolution_backward(
/*long*/ at::Tensor inputSize, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor d_output_features,
/*cuda float*/ at::Tensor weight, /*cuda float*/ at::Tensor d_weight,
/*cuda float*/ at::Tensor d_bias) {
auto _rules = m.getPermutohedralSubmanifoldRuleBook(inputSize, true);
Int nActive = m.getNActive(inputSize);
if (nActive) {
Int ip = weight.size(1);
Int op = weight.size(2);
d_input_features.resize_({nActive, ip});
d_input_features.zero_();
auto iF = input_features.data<T>();
auto diF = d_input_features.data<T>();
auto doF = d_output_features.data<T>();
auto w = weight.data<T>();
auto dw = d_weight.data<T>();
dConvolution_backward_dW2<T>(iF, diF, doF, w, dw, _rules, ip, ip, op, op);
if (d_bias.numel()) {
auto db = d_bias.data<T>();
Convolution_bp_bias(doF, db, op, nActive);
}
}
}
template <typename T, Int Dimension> template <typename T, Int Dimension>
double cuda_FullConvolution_updateOutput( double cuda_FullConvolution_updateOutput(
/*long*/ at::Tensor inputSize, /*long*/ at::Tensor outputSize, /*long*/ at::Tensor inputSize, /*long*/ at::Tensor outputSize,
......
...@@ -10,15 +10,15 @@ ...@@ -10,15 +10,15 @@
#include "ConvolutionRules.h" #include "ConvolutionRules.h"
#include "FullConvolutionRules.h" #include "FullConvolutionRules.h"
#include "IOLayersRules.h" #include "IOLayersRules.h"
#include "PermutohedralSubmanifoldConvolutionRules.h"
#include "RandomizedStrideRules.h" #include "RandomizedStrideRules.h"
#include "SubmanifoldConvolutionRules.h" #include "SubmanifoldConvolutionRules.h"
template <Int dimension> SparseGrid<dimension>::SparseGrid() : ctr(0) { template <Int dimension> SparseGrid<dimension>::SparseGrid() : ctr(0) {
// Sparsehash needs a key to be set aside and never used - we use // Sparsehash needs a key to be set aside and never used
// (-1,...,-1)
Point<dimension> empty_key; Point<dimension> empty_key;
for (Int i = 0; i < dimension; ++i) for (Int i = 0; i < dimension; ++i)
empty_key[i] = -1; empty_key[i] = std::numeric_limits<Int>::min();
mp.set_empty_key(empty_key); mp.set_empty_key(empty_key);
} }
...@@ -53,7 +53,7 @@ template <Int dimension> void Metadata<dimension>::clear() { ...@@ -53,7 +53,7 @@ template <Int dimension> void Metadata<dimension>::clear() {
grids.clear(); grids.clear();
activePoolingRuleBooks.clear(); activePoolingRuleBooks.clear();
inputLayerRuleBook.clear(); inputLayerRuleBook.clear();
validRuleBooks.clear(); submanifoldRuleBooks.clear();
ruleBooks.clear(); ruleBooks.clear();
fullConvolutionRuleBook.clear(); fullConvolutionRuleBook.clear();
sparseToDenseRuleBooks.clear(); sparseToDenseRuleBooks.clear();
...@@ -260,7 +260,7 @@ Metadata<dimension>::sparsifyCompare(Metadata<dimension> &mReference, ...@@ -260,7 +260,7 @@ Metadata<dimension>::sparsifyCompare(Metadata<dimension> &mReference,
Metadata<dimension> &mSparsified, Metadata<dimension> &mSparsified,
/*long*/ at::Tensor spatialSize) { /*long*/ at::Tensor spatialSize) {
auto p = LongTensorToPoint<dimension>(spatialSize); auto p = LongTensorToPoint<dimension>(spatialSize);
at::Tensor delta = torch::CPU(at::kFloat).zeros(nActive[p]); at::Tensor delta = at::zeros({nActive[p]}, torch::CPU(at::kFloat));
float *deltaPtr = delta.data<float>(); float *deltaPtr = delta.data<float>();
auto &sgsReference = mReference.grids[p]; auto &sgsReference = mReference.grids[p];
auto &sgsFull = grids[p]; auto &sgsFull = grids[p];
...@@ -356,7 +356,7 @@ template <Int dimension> void Metadata<dimension>::generateRuleBooks3s2() { ...@@ -356,7 +356,7 @@ template <Int dimension> void Metadata<dimension>::generateRuleBooks3s2() {
} }
while (true) { while (true) {
auto &SGs = grids[p1]; auto &SGs = grids[p1];
auto &rb = validRuleBooks[p2]; auto &rb = submanifoldRuleBooks[p2];
if (rb.empty()) if (rb.empty())
SubmanifoldConvolution_SgsToRules(SGs, rb, sz); SubmanifoldConvolution_SgsToRules(SGs, rb, sz);
for (Int i = 0; i < dimension; ++i) for (Int i = 0; i < dimension; ++i)
...@@ -387,7 +387,7 @@ template <Int dimension> void Metadata<dimension>::generateRuleBooks2s2() { ...@@ -387,7 +387,7 @@ template <Int dimension> void Metadata<dimension>::generateRuleBooks2s2() {
} }
while (true) { while (true) {
auto &SGs = grids[p1]; auto &SGs = grids[p1];
auto &rb = validRuleBooks[p2]; auto &rb = submanifoldRuleBooks[p2];
if (rb.empty()) if (rb.empty())
SubmanifoldConvolution_SgsToRules(SGs, rb, s3); SubmanifoldConvolution_SgsToRules(SGs, rb, s3);
for (Int i = 0; i < dimension; ++i) for (Int i = 0; i < dimension; ++i)
...@@ -434,7 +434,7 @@ RuleBook &Metadata<dimension>::getSubmanifoldRuleBook( ...@@ -434,7 +434,7 @@ RuleBook &Metadata<dimension>::getSubmanifoldRuleBook(
/*long*/ at::Tensor spatialSize, /*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor size, bool openMP) { /*long*/ at::Tensor size, bool openMP) {
auto p = TwoLongTensorsToPoint<dimension>(spatialSize, size); auto p = TwoLongTensorsToPoint<dimension>(spatialSize, size);
auto &rb = validRuleBooks[p]; auto &rb = submanifoldRuleBooks[p];
if (rb.empty()) { if (rb.empty()) {
auto &SGs = grids[LongTensorToPoint<dimension>(spatialSize)]; auto &SGs = grids[LongTensorToPoint<dimension>(spatialSize)];
#if defined(ENABLE_OPENMP) #if defined(ENABLE_OPENMP)
...@@ -445,6 +445,20 @@ RuleBook &Metadata<dimension>::getSubmanifoldRuleBook( ...@@ -445,6 +445,20 @@ RuleBook &Metadata<dimension>::getSubmanifoldRuleBook(
return rb; return rb;
} }
template <Int dimension> template <Int dimension>
RuleBook &Metadata<dimension>::getPermutohedralSubmanifoldRuleBook(
/*long*/ at::Tensor spatialSize, bool openMP) {
auto p = LongTensorToPoint<dimension>(spatialSize);
auto &rb = permutohedralRuleBooks[p];
if (rb.empty()) {
auto &SGs = grids[LongTensorToPoint<dimension>(spatialSize)];
#if defined(ENABLE_OPENMP)
openMP ? PermutohedralSubmanifoldConvolution_SgsToRules_OMP(SGs, rb) :
#endif
PermutohedralSubmanifoldConvolution_SgsToRules(SGs, rb);
}
return rb;
}
template <Int dimension>
RuleBook &Metadata<dimension>::getActivePoolingRuleBook( RuleBook &Metadata<dimension>::getActivePoolingRuleBook(
/*long*/ at::Tensor spatialSize) { /*long*/ at::Tensor spatialSize) {
auto spatialSz = LongTensorToPoint<dimension>(spatialSize); auto spatialSz = LongTensorToPoint<dimension>(spatialSize);
...@@ -549,6 +563,42 @@ RuleBook &Metadata<dimension>::getRandomizedStrideRuleBook( ...@@ -549,6 +563,42 @@ RuleBook &Metadata<dimension>::getRandomizedStrideRuleBook(
return rb; return rb;
} }
template <Int dimension>
std::vector<at::Tensor>
Metadata<dimension>::compareSparseHelper(Metadata<dimension> &mR,
/* long */ at::Tensor spatialSize) {
auto p = LongTensorToPoint<dimension>(spatialSize);
auto &sgsL = grids[p];
auto &sgsR = mR.grids[p];
std::vector<long> cL, cR, L, R;
for (Int sample = 0; sample < (Int)sgsL.size(); ++sample) {
auto &sgL = sgsL[sample];
auto &sgR = sgsR[sample];
for (auto const &iter : sgL.mp) {
if (sgR.mp.find(iter.first) == sgR.mp.end()) {
L.push_back(sgL.mp[iter.first] + sgL.ctr);
} else {
cL.push_back(sgL.mp[iter.first] + sgL.ctr);
cR.push_back(sgR.mp[iter.first] + sgR.ctr);
}
}
for (auto const &iter : sgR.mp) {
if (sgL.mp.find(iter.first) == sgL.mp.end()) {
R.push_back(sgR.mp[iter.first] + sgR.ctr);
}
}
}
at::Tensor cL_ = torch::CPU(at::kLong).tensor({(long)cL.size()});
std::memcpy(cL_.data<long>(), &cL[0], cL.size() * sizeof(long));
at::Tensor cR_ = torch::CPU(at::kLong).tensor({(long)cR.size()});
std::memcpy(cR_.data<long>(), &cR[0], cR.size() * sizeof(long));
at::Tensor L_ = torch::CPU(at::kLong).tensor({(long)L.size()});
std::memcpy(L_.data<long>(), &L[0], L.size() * sizeof(long));
at::Tensor R_ = torch::CPU(at::kLong).tensor({(long)R.size()});
std::memcpy(R_.data<long>(), &R[0], R.size() * sizeof(long));
return {cL_, cR_, L_, R_};
}
template <Int dimension> Int volume(long *point) { template <Int dimension> Int volume(long *point) {
Int v = 1; Int v = 1;
for (Int i = 0; i < dimension; i++) for (Int i = 0; i < dimension; i++)
......
...@@ -9,10 +9,12 @@ ...@@ -9,10 +9,12 @@
#include "32bits.h" #include "32bits.h"
#include <algorithm> #include <algorithm>
#include <array> #include <array>
#include <cassert>
#include <chrono> #include <chrono>
#include <cstdint> #include <cstdint>
#include <google/dense_hash_map> #include <google/dense_hash_map>
#include <iostream> #include <iostream>
#include <limits>
#include <numeric> #include <numeric>
#include <random> #include <random>
#include <string> #include <string>
...@@ -57,7 +59,10 @@ public: ...@@ -57,7 +59,10 @@ public:
std::unordered_map<Point<2 * dimension>, RuleBook, std::unordered_map<Point<2 * dimension>, RuleBook,
IntArrayHash<2 * dimension>> IntArrayHash<2 * dimension>>
validRuleBooks; submanifoldRuleBooks;
std::unordered_map<Point<dimension>, RuleBook, IntArrayHash<dimension>>
permutohedralRuleBooks;
std::unordered_map<Point<3 * dimension>, RuleBook, std::unordered_map<Point<3 * dimension>, RuleBook,
IntArrayHash<3 * dimension>> IntArrayHash<3 * dimension>>
...@@ -125,6 +130,8 @@ public: ...@@ -125,6 +130,8 @@ public:
Int mode); Int mode);
RuleBook &getSubmanifoldRuleBook(/*long*/ at::Tensor spatialSize, RuleBook &getSubmanifoldRuleBook(/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor size, bool openMP); /*long*/ at::Tensor size, bool openMP);
RuleBook &getPermutohedralSubmanifoldRuleBook(/*long*/ at::Tensor spatialSize,
bool openMP);
RuleBook &getActivePoolingRuleBook(/*long*/ at::Tensor spatialSize); RuleBook &getActivePoolingRuleBook(/*long*/ at::Tensor spatialSize);
RuleBook &getSparseToDenseRuleBook(/*long*/ at::Tensor spatialSize, RuleBook &getSparseToDenseRuleBook(/*long*/ at::Tensor spatialSize,
bool openMP); bool openMP);
...@@ -143,6 +150,10 @@ public: ...@@ -143,6 +150,10 @@ public:
/*long*/ at::Tensor size, /*long*/ at::Tensor size,
/*long*/ at::Tensor stride, /*long*/ at::Tensor stride,
bool openMP); bool openMP);
std::vector<at::Tensor>
compareSparseHelper(Metadata<dimension> &mR,
/* long */ at::Tensor spatialSize);
}; };
template <typename T> T *OptionalTensorData(at::Tensor tensor); template <typename T> T *OptionalTensorData(at::Tensor tensor);
......
// Copyright 2016-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
#ifndef PERMUTOHEDRALSUBMANIFOLDCONVOLUTIONRULES_H
#define PERMUTOHEDRALSUBMANIFOLDCONVOLUTIONRULES_H
// N=10
// import torch
// a=torch.zeros(N,N)
// for i in range(N):
// for j in range(i):
// dp=(a[i,:]*a[j,:]).sum()
// a[i,j]=(0.5-dp)/a[j,j]
// dp=(a[i,:]*a[i,:]).sum()
// a[i,i]=(1-dp)**0.5
// ai=torch.inverse(a)
// r=1
// for dim in range(1,N+1):
// c=torch.arange((2*r+1)**dim).long()[:,None].expand(-1,dim)
// c=c/((2*r+1)**torch.arange(0,dim).long())
// c%=2*r+1
// c-=r
// c=c.float()
// for x in c:
// v=(x[:,None]*a[:dim,:dim]).sum(0)
// m=(v*v).sum().item()
// if m<=r**2+0.01:
// print(v)
std::vector<std::vector<std::vector<Int>>> permutohedralOffsets = {
{},
{{0}, {-1}, {1}},
{{0, -1}, {1, -1}, {-1, 0}, {0, 0}, {1, 0}, {-1, 1}, {0, 1}},
{{0, 0, 0},
{0, 0, -1},
{1, 0, -1},
{0, 1, -1},
{0, -1, 0},
{1, -1, 0},
{-1, 0, 0},
{1, 0, 0},
{-1, 1, 0},
{0, 1, 0},
{0, -1, 1},
{-1, 0, 1},
{0, 0, 1}},
{{0, 0, 0, 0}, {0, 0, 0, -1}, {1, 0, 0, -1}, {0, 1, 0, -1}, {0, 0, 1, -1},
{0, 0, -1, 0}, {1, 0, -1, 0}, {0, 1, -1, 0}, {0, -1, 0, 0}, {1, -1, 0, 0},
{-1, 0, 0, 0}, {1, 0, 0, 0}, {-1, 1, 0, 0}, {0, 1, 0, 0}, {0, -1, 1, 0},
{-1, 0, 1, 0}, {0, 0, 1, 0}, {0, 0, -1, 1}, {0, -1, 0, 1}, {-1, 0, 0, 1},
{0, 0, 0, 1}},
{{0, 0, 0, 0, 0}, {0, 0, 0, 0, -1}, {1, 0, 0, 0, -1}, {0, 1, 0, 0, -1},
{0, 0, 1, 0, -1}, {0, 0, 0, 1, -1}, {0, 0, 0, -1, 0}, {1, 0, 0, -1, 0},
{0, 1, 0, -1, 0}, {0, 0, 1, -1, 0}, {0, 0, -1, 0, 0}, {1, 0, -1, 0, 0},
{0, 1, -1, 0, 0}, {0, -1, 0, 0, 0}, {1, -1, 0, 0, 0}, {-1, 0, 0, 0, 0},
{1, 0, 0, 0, 0}, {-1, 1, 0, 0, 0}, {0, 1, 0, 0, 0}, {0, -1, 1, 0, 0},
{-1, 0, 1, 0, 0}, {0, 0, 1, 0, 0}, {0, 0, -1, 1, 0}, {0, -1, 0, 1, 0},
{-1, 0, 0, 1, 0}, {0, 0, 0, 1, 0}, {0, 0, 0, -1, 1}, {0, 0, -1, 0, 1},
{0, -1, 0, 0, 1}, {-1, 0, 0, 0, 1}, {0, 0, 0, 0, 1}},
{{0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, -1}, {1, 0, 0, 0, 0, -1},
{0, 1, 0, 0, 0, -1}, {0, 0, 1, 0, 0, -1}, {0, 0, 0, 1, 0, -1},
{0, 0, 0, 0, 1, -1}, {0, 0, 0, 0, -1, 0}, {1, 0, 0, 0, -1, 0},
{0, 1, 0, 0, -1, 0}, {0, 0, 1, 0, -1, 0}, {0, 0, 0, 1, -1, 0},
{0, 0, 0, -1, 0, 0}, {1, 0, 0, -1, 0, 0}, {0, 1, 0, -1, 0, 0},
{0, 0, 1, -1, 0, 0}, {0, 0, -1, 0, 0, 0}, {1, 0, -1, 0, 0, 0},
{0, 1, -1, 0, 0, 0}, {0, -1, 0, 0, 0, 0}, {1, -1, 0, 0, 0, 0},
{-1, 0, 0, 0, 0, 0}, {1, 0, 0, 0, 0, 0}, {-1, 1, 0, 0, 0, 0},
{0, 1, 0, 0, 0, 0}, {0, -1, 1, 0, 0, 0}, {-1, 0, 1, 0, 0, 0},
{0, 0, 1, 0, 0, 0}, {0, 0, -1, 1, 0, 0}, {0, -1, 0, 1, 0, 0},
{-1, 0, 0, 1, 0, 0}, {0, 0, 0, 1, 0, 0}, {0, 0, 0, -1, 1, 0},
{0, 0, -1, 0, 1, 0}, {0, -1, 0, 0, 1, 0}, {-1, 0, 0, 0, 1, 0},
{0, 0, 0, 0, 1, 0}, {0, 0, 0, 0, -1, 1}, {0, 0, 0, -1, 0, 1},
{0, 0, -1, 0, 0, 1}, {0, -1, 0, 0, 0, 1}, {-1, 0, 0, 0, 0, 1},
{0, 0, 0, 0, 0, 1}},
{{0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, -1}, {1, 0, 0, 0, 0, 0, -1},
{0, 1, 0, 0, 0, 0, -1}, {0, 0, 1, 0, 0, 0, -1}, {0, 0, 0, 1, 0, 0, -1},
{0, 0, 0, 0, 1, 0, -1}, {0, 0, 0, 0, 0, 1, -1}, {0, 0, 0, 0, 0, -1, 0},
{1, 0, 0, 0, 0, -1, 0}, {0, 1, 0, 0, 0, -1, 0}, {0, 0, 1, 0, 0, -1, 0},
{0, 0, 0, 1, 0, -1, 0}, {0, 0, 0, 0, 1, -1, 0}, {0, 0, 0, 0, -1, 0, 0},
{1, 0, 0, 0, -1, 0, 0}, {0, 1, 0, 0, -1, 0, 0}, {0, 0, 1, 0, -1, 0, 0},
{0, 0, 0, 1, -1, 0, 0}, {0, 0, 0, -1, 0, 0, 0}, {1, 0, 0, -1, 0, 0, 0},
{0, 1, 0, -1, 0, 0, 0}, {0, 0, 1, -1, 0, 0, 0}, {0, 0, -1, 0, 0, 0, 0},
{1, 0, -1, 0, 0, 0, 0}, {0, 1, -1, 0, 0, 0, 0}, {0, -1, 0, 0, 0, 0, 0},
{1, -1, 0, 0, 0, 0, 0}, {-1, 0, 0, 0, 0, 0, 0}, {1, 0, 0, 0, 0, 0, 0},
{-1, 1, 0, 0, 0, 0, 0}, {0, 1, 0, 0, 0, 0, 0}, {0, -1, 1, 0, 0, 0, 0},
{-1, 0, 1, 0, 0, 0, 0}, {0, 0, 1, 0, 0, 0, 0}, {0, 0, -1, 1, 0, 0, 0},
{0, -1, 0, 1, 0, 0, 0}, {-1, 0, 0, 1, 0, 0, 0}, {0, 0, 0, 1, 0, 0, 0},
{0, 0, 0, -1, 1, 0, 0}, {0, 0, -1, 0, 1, 0, 0}, {0, -1, 0, 0, 1, 0, 0},
{-1, 0, 0, 0, 1, 0, 0}, {0, 0, 0, 0, 1, 0, 0}, {0, 0, 0, 0, -1, 1, 0},
{0, 0, 0, -1, 0, 1, 0}, {0, 0, -1, 0, 0, 1, 0}, {0, -1, 0, 0, 0, 1, 0},
{-1, 0, 0, 0, 0, 1, 0}, {0, 0, 0, 0, 0, 1, 0}, {0, 0, 0, 0, 0, -1, 1},
{0, 0, 0, 0, -1, 0, 1}, {0, 0, 0, -1, 0, 0, 1}, {0, 0, -1, 0, 0, 0, 1},
{0, -1, 0, 0, 0, 0, 1}, {-1, 0, 0, 0, 0, 0, 1}, {0, 0, 0, 0, 0, 0, 1}},
{{0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, -1},
{1, 0, 0, 0, 0, 0, 0, -1}, {0, 1, 0, 0, 0, 0, 0, -1},
{0, 0, 1, 0, 0, 0, 0, -1}, {0, 0, 0, 1, 0, 0, 0, -1},
{0, 0, 0, 0, 1, 0, 0, -1}, {0, 0, 0, 0, 0, 1, 0, -1},
{0, 0, 0, 0, 0, 0, 1, -1}, {0, 0, 0, 0, 0, 0, -1, 0},
{1, 0, 0, 0, 0, 0, -1, 0}, {0, 1, 0, 0, 0, 0, -1, 0},
{0, 0, 1, 0, 0, 0, -1, 0}, {0, 0, 0, 1, 0, 0, -1, 0},
{0, 0, 0, 0, 1, 0, -1, 0}, {0, 0, 0, 0, 0, 1, -1, 0},
{0, 0, 0, 0, 0, -1, 0, 0}, {1, 0, 0, 0, 0, -1, 0, 0},
{0, 1, 0, 0, 0, -1, 0, 0}, {0, 0, 1, 0, 0, -1, 0, 0},
{0, 0, 0, 1, 0, -1, 0, 0}, {0, 0, 0, 0, 1, -1, 0, 0},
{0, 0, 0, 0, -1, 0, 0, 0}, {1, 0, 0, 0, -1, 0, 0, 0},
{0, 1, 0, 0, -1, 0, 0, 0}, {0, 0, 1, 0, -1, 0, 0, 0},
{0, 0, 0, 1, -1, 0, 0, 0}, {0, 0, 0, -1, 0, 0, 0, 0},
{1, 0, 0, -1, 0, 0, 0, 0}, {0, 1, 0, -1, 0, 0, 0, 0},
{0, 0, 1, -1, 0, 0, 0, 0}, {0, 0, -1, 0, 0, 0, 0, 0},
{1, 0, -1, 0, 0, 0, 0, 0}, {0, 1, -1, 0, 0, 0, 0, 0},
{0, -1, 0, 0, 0, 0, 0, 0}, {1, -1, 0, 0, 0, 0, 0, 0},
{-1, 0, 0, 0, 0, 0, 0, 0}, {1, 0, 0, 0, 0, 0, 0, 0},
{-1, 1, 0, 0, 0, 0, 0, 0}, {0, 1, 0, 0, 0, 0, 0, 0},
{0, -1, 1, 0, 0, 0, 0, 0}, {-1, 0, 1, 0, 0, 0, 0, 0},
{0, 0, 1, 0, 0, 0, 0, 0}, {0, 0, -1, 1, 0, 0, 0, 0},
{0, -1, 0, 1, 0, 0, 0, 0}, {-1, 0, 0, 1, 0, 0, 0, 0},
{0, 0, 0, 1, 0, 0, 0, 0}, {0, 0, 0, -1, 1, 0, 0, 0},
{0, 0, -1, 0, 1, 0, 0, 0}, {0, -1, 0, 0, 1, 0, 0, 0},
{-1, 0, 0, 0, 1, 0, 0, 0}, {0, 0, 0, 0, 1, 0, 0, 0},
{0, 0, 0, 0, -1, 1, 0, 0}, {0, 0, 0, -1, 0, 1, 0, 0},
{0, 0, -1, 0, 0, 1, 0, 0}, {0, -1, 0, 0, 0, 1, 0, 0},
{-1, 0, 0, 0, 0, 1, 0, 0}, {0, 0, 0, 0, 0, 1, 0, 0},
{0, 0, 0, 0, 0, -1, 1, 0}, {0, 0, 0, 0, -1, 0, 1, 0},
{0, 0, 0, -1, 0, 0, 1, 0}, {0, 0, -1, 0, 0, 0, 1, 0},
{0, -1, 0, 0, 0, 0, 1, 0}, {-1, 0, 0, 0, 0, 0, 1, 0},
{0, 0, 0, 0, 0, 0, 1, 0}, {0, 0, 0, 0, 0, 0, -1, 1},
{0, 0, 0, 0, 0, -1, 0, 1}, {0, 0, 0, 0, -1, 0, 0, 1},
{0, 0, 0, -1, 0, 0, 0, 1}, {0, 0, -1, 0, 0, 0, 0, 1},
{0, -1, 0, 0, 0, 0, 0, 1}, {-1, 0, 0, 0, 0, 0, 0, 1},
{0, 0, 0, 0, 0, 0, 0, 1}},
{{0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, -1},
{1, 0, 0, 0, 0, 0, 0, 0, -1}, {0, 1, 0, 0, 0, 0, 0, 0, -1},
{0, 0, 1, 0, 0, 0, 0, 0, -1}, {0, 0, 0, 1, 0, 0, 0, 0, -1},
{0, 0, 0, 0, 1, 0, 0, 0, -1}, {0, 0, 0, 0, 0, 1, 0, 0, -1},
{0, 0, 0, 0, 0, 0, 1, 0, -1}, {0, 0, 0, 0, 0, 0, 0, 1, -1},
{0, 0, 0, 0, 0, 0, 0, -1, 0}, {1, 0, 0, 0, 0, 0, 0, -1, 0},
{0, 1, 0, 0, 0, 0, 0, -1, 0}, {0, 0, 1, 0, 0, 0, 0, -1, 0},
{0, 0, 0, 1, 0, 0, 0, -1, 0}, {0, 0, 0, 0, 1, 0, 0, -1, 0},
{0, 0, 0, 0, 0, 1, 0, -1, 0}, {0, 0, 0, 0, 0, 0, 1, -1, 0},
{0, 0, 0, 0, 0, 0, -1, 0, 0}, {1, 0, 0, 0, 0, 0, -1, 0, 0},
{0, 1, 0, 0, 0, 0, -1, 0, 0}, {0, 0, 1, 0, 0, 0, -1, 0, 0},
{0, 0, 0, 1, 0, 0, -1, 0, 0}, {0, 0, 0, 0, 1, 0, -1, 0, 0},
{0, 0, 0, 0, 0, 1, -1, 0, 0}, {0, 0, 0, 0, 0, -1, 0, 0, 0},
{1, 0, 0, 0, 0, -1, 0, 0, 0}, {0, 1, 0, 0, 0, -1, 0, 0, 0},
{0, 0, 1, 0, 0, -1, 0, 0, 0}, {0, 0, 0, 1, 0, -1, 0, 0, 0},
{0, 0, 0, 0, 1, -1, 0, 0, 0}, {0, 0, 0, 0, -1, 0, 0, 0, 0},
{1, 0, 0, 0, -1, 0, 0, 0, 0}, {0, 1, 0, 0, -1, 0, 0, 0, 0},
{0, 0, 1, 0, -1, 0, 0, 0, 0}, {0, 0, 0, 1, -1, 0, 0, 0, 0},
{0, 0, 0, -1, 0, 0, 0, 0, 0}, {1, 0, 0, -1, 0, 0, 0, 0, 0},
{0, 1, 0, -1, 0, 0, 0, 0, 0}, {0, 0, 1, -1, 0, 0, 0, 0, 0},
{0, 0, -1, 0, 0, 0, 0, 0, 0}, {1, 0, -1, 0, 0, 0, 0, 0, 0},
{0, 1, -1, 0, 0, 0, 0, 0, 0}, {0, -1, 0, 0, 0, 0, 0, 0, 0},
{1, -1, 0, 0, 0, 0, 0, 0, 0}, {-1, 0, 0, 0, 0, 0, 0, 0, 0},
{1, 0, 0, 0, 0, 0, 0, 0, 0}, {-1, 1, 0, 0, 0, 0, 0, 0, 0},
{0, 1, 0, 0, 0, 0, 0, 0, 0}, {0, -1, 1, 0, 0, 0, 0, 0, 0},
{-1, 0, 1, 0, 0, 0, 0, 0, 0}, {0, 0, 1, 0, 0, 0, 0, 0, 0},
{0, 0, -1, 1, 0, 0, 0, 0, 0}, {0, -1, 0, 1, 0, 0, 0, 0, 0},
{-1, 0, 0, 1, 0, 0, 0, 0, 0}, {0, 0, 0, 1, 0, 0, 0, 0, 0},
{0, 0, 0, -1, 1, 0, 0, 0, 0}, {0, 0, -1, 0, 1, 0, 0, 0, 0},
{0, -1, 0, 0, 1, 0, 0, 0, 0}, {-1, 0, 0, 0, 1, 0, 0, 0, 0},
{0, 0, 0, 0, 1, 0, 0, 0, 0}, {0, 0, 0, 0, -1, 1, 0, 0, 0},
{0, 0, 0, -1, 0, 1, 0, 0, 0}, {0, 0, -1, 0, 0, 1, 0, 0, 0},
{0, -1, 0, 0, 0, 1, 0, 0, 0}, {-1, 0, 0, 0, 0, 1, 0, 0, 0},
{0, 0, 0, 0, 0, 1, 0, 0, 0}, {0, 0, 0, 0, 0, -1, 1, 0, 0},
{0, 0, 0, 0, -1, 0, 1, 0, 0}, {0, 0, 0, -1, 0, 0, 1, 0, 0},
{0, 0, -1, 0, 0, 0, 1, 0, 0}, {0, -1, 0, 0, 0, 0, 1, 0, 0},
{-1, 0, 0, 0, 0, 0, 1, 0, 0}, {0, 0, 0, 0, 0, 0, 1, 0, 0},
{0, 0, 0, 0, 0, 0, -1, 1, 0}, {0, 0, 0, 0, 0, -1, 0, 1, 0},
{0, 0, 0, 0, -1, 0, 0, 1, 0}, {0, 0, 0, -1, 0, 0, 0, 1, 0},
{0, 0, -1, 0, 0, 0, 0, 1, 0}, {0, -1, 0, 0, 0, 0, 0, 1, 0},
{-1, 0, 0, 0, 0, 0, 0, 1, 0}, {0, 0, 0, 0, 0, 0, 0, 1, 0},
{0, 0, 0, 0, 0, 0, 0, -1, 1}, {0, 0, 0, 0, 0, 0, -1, 0, 1},
{0, 0, 0, 0, 0, -1, 0, 0, 1}, {0, 0, 0, 0, -1, 0, 0, 0, 1},
{0, 0, 0, -1, 0, 0, 0, 0, 1}, {0, 0, -1, 0, 0, 0, 0, 0, 1},
{0, -1, 0, 0, 0, 0, 0, 0, 1}, {-1, 0, 0, 0, 0, 0, 0, 0, 1},
{0, 0, 0, 0, 0, 0, 0, 0, 1}},
{{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 0, 0, -1},
{1, 0, 0, 0, 0, 0, 0, 0, 0, -1}, {0, 1, 0, 0, 0, 0, 0, 0, 0, -1},
{0, 0, 1, 0, 0, 0, 0, 0, 0, -1}, {0, 0, 0, 1, 0, 0, 0, 0, 0, -1},
{0, 0, 0, 0, 1, 0, 0, 0, 0, -1}, {0, 0, 0, 0, 0, 1, 0, 0, 0, -1},
{0, 0, 0, 0, 0, 0, 1, 0, 0, -1}, {0, 0, 0, 0, 0, 0, 0, 1, 0, -1},
{0, 0, 0, 0, 0, 0, 0, 0, 1, -1}, {0, 0, 0, 0, 0, 0, 0, 0, -1, 0},
{1, 0, 0, 0, 0, 0, 0, 0, -1, 0}, {0, 1, 0, 0, 0, 0, 0, 0, -1, 0},
{0, 0, 1, 0, 0, 0, 0, 0, -1, 0}, {0, 0, 0, 1, 0, 0, 0, 0, -1, 0},
{0, 0, 0, 0, 1, 0, 0, 0, -1, 0}, {0, 0, 0, 0, 0, 1, 0, 0, -1, 0},
{0, 0, 0, 0, 0, 0, 1, 0, -1, 0}, {0, 0, 0, 0, 0, 0, 0, 1, -1, 0},
{0, 0, 0, 0, 0, 0, 0, -1, 0, 0}, {1, 0, 0, 0, 0, 0, 0, -1, 0, 0},
{0, 1, 0, 0, 0, 0, 0, -1, 0, 0}, {0, 0, 1, 0, 0, 0, 0, -1, 0, 0},
{0, 0, 0, 1, 0, 0, 0, -1, 0, 0}, {0, 0, 0, 0, 1, 0, 0, -1, 0, 0},
{0, 0, 0, 0, 0, 1, 0, -1, 0, 0}, {0, 0, 0, 0, 0, 0, 1, -1, 0, 0},
{0, 0, 0, 0, 0, 0, -1, 0, 0, 0}, {1, 0, 0, 0, 0, 0, -1, 0, 0, 0},
{0, 1, 0, 0, 0, 0, -1, 0, 0, 0}, {0, 0, 1, 0, 0, 0, -1, 0, 0, 0},
{0, 0, 0, 1, 0, 0, -1, 0, 0, 0}, {0, 0, 0, 0, 1, 0, -1, 0, 0, 0},
{0, 0, 0, 0, 0, 1, -1, 0, 0, 0}, {0, 0, 0, 0, 0, -1, 0, 0, 0, 0},
{1, 0, 0, 0, 0, -1, 0, 0, 0, 0}, {0, 1, 0, 0, 0, -1, 0, 0, 0, 0},
{0, 0, 1, 0, 0, -1, 0, 0, 0, 0}, {0, 0, 0, 1, 0, -1, 0, 0, 0, 0},
{0, 0, 0, 0, 1, -1, 0, 0, 0, 0}, {0, 0, 0, 0, -1, 0, 0, 0, 0, 0},
{1, 0, 0, 0, -1, 0, 0, 0, 0, 0}, {0, 1, 0, 0, -1, 0, 0, 0, 0, 0},
{0, 0, 1, 0, -1, 0, 0, 0, 0, 0}, {0, 0, 0, 1, -1, 0, 0, 0, 0, 0},
{0, 0, 0, -1, 0, 0, 0, 0, 0, 0}, {1, 0, 0, -1, 0, 0, 0, 0, 0, 0},
{0, 1, 0, -1, 0, 0, 0, 0, 0, 0}, {0, 0, 1, -1, 0, 0, 0, 0, 0, 0},
{0, 0, -1, 0, 0, 0, 0, 0, 0, 0}, {1, 0, -1, 0, 0, 0, 0, 0, 0, 0},
{0, 1, -1, 0, 0, 0, 0, 0, 0, 0}, {0, -1, 0, 0, 0, 0, 0, 0, 0, 0},
{1, -1, 0, 0, 0, 0, 0, 0, 0, 0}, {-1, 0, 0, 0, 0, 0, 0, 0, 0, 0},
{1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, {-1, 1, 0, 0, 0, 0, 0, 0, 0, 0},
{0, 1, 0, 0, 0, 0, 0, 0, 0, 0}, {0, -1, 1, 0, 0, 0, 0, 0, 0, 0},
{-1, 0, 1, 0, 0, 0, 0, 0, 0, 0}, {0, 0, 1, 0, 0, 0, 0, 0, 0, 0},
{0, 0, -1, 1, 0, 0, 0, 0, 0, 0}, {0, -1, 0, 1, 0, 0, 0, 0, 0, 0},
{-1, 0, 0, 1, 0, 0, 0, 0, 0, 0}, {0, 0, 0, 1, 0, 0, 0, 0, 0, 0},
{0, 0, 0, -1, 1, 0, 0, 0, 0, 0}, {0, 0, -1, 0, 1, 0, 0, 0, 0, 0},
{0, -1, 0, 0, 1, 0, 0, 0, 0, 0}, {-1, 0, 0, 0, 1, 0, 0, 0, 0, 0},
{0, 0, 0, 0, 1, 0, 0, 0, 0, 0}, {0, 0, 0, 0, -1, 1, 0, 0, 0, 0},
{0, 0, 0, -1, 0, 1, 0, 0, 0, 0}, {0, 0, -1, 0, 0, 1, 0, 0, 0, 0},
{0, -1, 0, 0, 0, 1, 0, 0, 0, 0}, {-1, 0, 0, 0, 0, 1, 0, 0, 0, 0},
{0, 0, 0, 0, 0, 1, 0, 0, 0, 0}, {0, 0, 0, 0, 0, -1, 1, 0, 0, 0},
{0, 0, 0, 0, -1, 0, 1, 0, 0, 0}, {0, 0, 0, -1, 0, 0, 1, 0, 0, 0},
{0, 0, -1, 0, 0, 0, 1, 0, 0, 0}, {0, -1, 0, 0, 0, 0, 1, 0, 0, 0},
{-1, 0, 0, 0, 0, 0, 1, 0, 0, 0}, {0, 0, 0, 0, 0, 0, 1, 0, 0, 0},
{0, 0, 0, 0, 0, 0, -1, 1, 0, 0}, {0, 0, 0, 0, 0, -1, 0, 1, 0, 0},
{0, 0, 0, 0, -1, 0, 0, 1, 0, 0}, {0, 0, 0, -1, 0, 0, 0, 1, 0, 0},
{0, 0, -1, 0, 0, 0, 0, 1, 0, 0}, {0, -1, 0, 0, 0, 0, 0, 1, 0, 0},
{-1, 0, 0, 0, 0, 0, 0, 1, 0, 0}, {0, 0, 0, 0, 0, 0, 0, 1, 0, 0},
{0, 0, 0, 0, 0, 0, 0, -1, 1, 0}, {0, 0, 0, 0, 0, 0, -1, 0, 1, 0},
{0, 0, 0, 0, 0, -1, 0, 0, 1, 0}, {0, 0, 0, 0, -1, 0, 0, 0, 1, 0},
{0, 0, 0, -1, 0, 0, 0, 0, 1, 0}, {0, 0, -1, 0, 0, 0, 0, 0, 1, 0},
{0, -1, 0, 0, 0, 0, 0, 0, 1, 0}, {-1, 0, 0, 0, 0, 0, 0, 0, 1, 0},
{0, 0, 0, 0, 0, 0, 0, 0, 1, 0}, {0, 0, 0, 0, 0, 0, 0, 0, -1, 1},
{0, 0, 0, 0, 0, 0, 0, -1, 0, 1}, {0, 0, 0, 0, 0, 0, -1, 0, 0, 1},
{0, 0, 0, 0, 0, -1, 0, 0, 0, 1}, {0, 0, 0, 0, -1, 0, 0, 0, 0, 1},
{0, 0, 0, -1, 0, 0, 0, 0, 0, 1}, {0, 0, -1, 0, 0, 0, 0, 0, 0, 1},
{0, -1, 0, 0, 0, 0, 0, 0, 0, 1}, {-1, 0, 0, 0, 0, 0, 0, 0, 0, 1},
{0, 0, 0, 0, 0, 0, 0, 0, 0, 1}}};
template <Int dimension> class PermutohedralRegionIterator;
template <Int dimension> class PermutohedralRegion {
public:
Point<dimension> x;
PermutohedralRegion(const Point<dimension> &x) : x(x) {
assert(dimension <= 10);
}
PermutohedralRegionIterator<dimension> begin() {
return PermutohedralRegionIterator<dimension>(*this, x);
}
PermutohedralRegionIterator<dimension> end() {
// Not really used by the custom operator!= function
return PermutohedralRegionIterator<dimension>(*this, x);
}
};
template <Int dimension> class PermutohedralRegionIterator {
private:
PermutohedralRegion<dimension> &region;
unsigned int offset;
public:
Point<dimension> point;
bool stillLooping;
PermutohedralRegionIterator(PermutohedralRegion<dimension> &region,
Point<dimension> &point)
: region(region), offset(0), point(point), stillLooping(true) {}
PermutohedralRegionIterator<dimension> &operator++() {
auto offsets = permutohedralOffsets[dimension];
if (++offset == offsets.size()) {
stillLooping = false; // Signal to operator!= to end iteration
} else {
auto delta = offsets[offset];
for (Int i = 0; i < dimension; ++i)
point[i] = region.x[i] + delta[i];
}
return *this;
}
Point<dimension> &operator*() { return point; }
};
// Only to be used for checking the end point of range based for loops.
template <Int dimension>
inline bool operator!=(const PermutohedralRegionIterator<dimension> &lhs,
const PermutohedralRegionIterator<dimension> &rhs) {
return lhs.stillLooping;
}
// Call for each convolutional layer, once for each batch item.
// rules is used to carry out the "lowering" whilst carrying out the convolution
template <Int dimension>
double
PermutohedralSubmanifoldConvolution_SgToRules(SparseGrid<dimension> &grid,
RuleBook &rules) {
double countActiveInputs = 0;
for (auto const &outputIter : grid.mp) {
auto inRegion = PermutohedralRegion<dimension>(outputIter.first);
Int rulesOffset = 0;
for (auto inputPoint : inRegion) {
auto inputIter = grid.mp.find(inputPoint);
if (inputIter != grid.mp.end()) {
rules[rulesOffset].push_back(inputIter->second + grid.ctr);
rules[rulesOffset].push_back(outputIter.second + grid.ctr);
countActiveInputs++;
}
rulesOffset++;
}
}
return countActiveInputs;
}
template <Int dimension>
Int PermutohedralSubmanifoldConvolution_SgsToRules(SparseGrids<dimension> &SGs,
RuleBook &rules) {
Int sd = permutohedralOffsets[dimension].size();
Int countActiveInputs = 0;
rules.clear();
rules.resize(sd);
for (Int i = 0; i < (Int)SGs.size(); i++)
countActiveInputs +=
PermutohedralSubmanifoldConvolution_SgToRules<dimension>(SGs[i], rules);
return countActiveInputs;
}
template <Int dimension>
Int PermutohedralSubmanifoldConvolution_SgsToRules_OMP(
SparseGrids<dimension> &SGs, RuleBook &rules) {
std::vector<RuleBook> rbs(SGs.size());
std::vector<double> countActiveInputs(SGs.size());
rules.clear();
Int sd = permutohedralOffsets[dimension].size();
rules.resize(sd);
{
Int i;
#pragma omp parallel for private(i)
for (i = 0; i < (Int)SGs.size(); i++) {
rbs[i].resize(sd);
countActiveInputs[i] =
PermutohedralSubmanifoldConvolution_SgToRules<dimension>(SGs[i],
rbs[i]);
}
}
{
Int i;
#pragma omp parallel for private(i)
for (i = 0; i < sd; i++)
for (auto const &rb : rbs)
rules[i].insert(rules[i].end(), rb[i].begin(), rb[i].end());
}
Int countActiveInputs_ = 0;
for (auto &i : countActiveInputs)
countActiveInputs_ += i;
return countActiveInputs_;
}
#endif /* PERMUTOHEDRALSUBMANIFOLDCONVOLUTIONRULES_H */
...@@ -4,13 +4,13 @@ ...@@ -4,13 +4,13 @@
// This source code is licensed under the license found in the // This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree. // LICENSE file in the root directory of this source tree.
#ifndef VALIDCONVOLUTIONRULES_H #ifndef SUBMANIFOLDCONVOLUTIONRULES_H
#define VALIDCONVOLUTIONRULES_H #define SUBMANIFOLDCONVOLUTIONRULES_H
// Full input region for an output point // Full input region for an output point
template <Int dimension> template <Int dimension>
RectangularRegion<dimension> RectangularRegion<dimension>
InputRegionCalculator_Valid(const Point<dimension> &output, long *size) { InputRegionCalculator_Submanifold(const Point<dimension> &output, long *size) {
Point<dimension> lb, ub; Point<dimension> lb, ub;
for (Int i = 0; i < dimension; i++) { for (Int i = 0; i < dimension; i++) {
Int pad = size[i] / 2; Int pad = size[i] / 2;
...@@ -29,7 +29,7 @@ double SubmanifoldConvolution_SgToRules(SparseGrid<dimension> &grid, ...@@ -29,7 +29,7 @@ double SubmanifoldConvolution_SgToRules(SparseGrid<dimension> &grid,
double countActiveInputs = 0; double countActiveInputs = 0;
for (auto const &outputIter : grid.mp) { for (auto const &outputIter : grid.mp) {
auto inRegion = auto inRegion =
InputRegionCalculator_Valid<dimension>(outputIter.first, size); InputRegionCalculator_Submanifold<dimension>(outputIter.first, size);
Int rulesOffset = 0; Int rulesOffset = 0;
for (auto inputPoint : inRegion) { for (auto inputPoint : inRegion) {
auto inputIter = grid.mp.find(inputPoint); auto inputIter = grid.mp.find(inputPoint);
...@@ -86,4 +86,4 @@ Int SubmanifoldConvolution_SgsToRules_OMP(SparseGrids<dimension> &SGs, ...@@ -86,4 +86,4 @@ Int SubmanifoldConvolution_SgsToRules_OMP(SparseGrids<dimension> &SGs,
return countActiveInputs_; return countActiveInputs_;
} }
#endif /* VALIDCONVOLUTIONRULES_H */ #endif /* SUBMANIFOLDCONVOLUTIONRULES_H */
...@@ -27,8 +27,8 @@ template <Int Dimension> void dimension(py::module &m, const char *name) { ...@@ -27,8 +27,8 @@ template <Int Dimension> void dimension(py::module &m, const char *name) {
.def("addSampleFromThresholdedTensor", .def("addSampleFromThresholdedTensor",
&Metadata<Dimension>::addSampleFromThresholdedTensor) &Metadata<Dimension>::addSampleFromThresholdedTensor)
.def("generateRuleBooks3s2", &Metadata<Dimension>::generateRuleBooks3s2) .def("generateRuleBooks3s2", &Metadata<Dimension>::generateRuleBooks3s2)
.def("generateRuleBooks2s2", &Metadata<Dimension>::generateRuleBooks2s2); .def("generateRuleBooks2s2", &Metadata<Dimension>::generateRuleBooks2s2)
.def("compareSparseHelper", &Metadata<Dimension>::compareSparseHelper);
m.def("ActivePooling_updateOutput", m.def("ActivePooling_updateOutput",
(void (*)(at::Tensor, Metadata<Dimension> &, at::Tensor, at::Tensor, (void (*)(at::Tensor, Metadata<Dimension> &, at::Tensor, at::Tensor,
bool)) & bool)) &
...@@ -140,6 +140,16 @@ template <Int Dimension> void dimension(py::module &m, const char *name) { ...@@ -140,6 +140,16 @@ template <Int Dimension> void dimension(py::module &m, const char *name) {
at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor)) & at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor)) &
SubmanifoldConvolution_backward, SubmanifoldConvolution_backward,
""); "");
m.def("PermutohedralSubmanifoldConvolution_updateOutput",
(double (*)(at::Tensor, Metadata<Dimension> &, at::Tensor, at::Tensor,
at::Tensor, at::Tensor)) &
PermutohedralSubmanifoldConvolution_updateOutput,
"");
m.def("PermutohedralSubmanifoldConvolution_backward",
(void (*)(at::Tensor, Metadata<Dimension> &, at::Tensor, at::Tensor,
at::Tensor, at::Tensor, at::Tensor, at::Tensor)) &
PermutohedralSubmanifoldConvolution_backward,
"");
m.def("InputLayer_updateOutput", m.def("InputLayer_updateOutput",
(void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor, (void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor, long, long)) & at::Tensor, long, long)) &
......
...@@ -104,6 +104,15 @@ void SubmanifoldConvolution_backward( ...@@ -104,6 +104,15 @@ void SubmanifoldConvolution_backward(
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight, at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight,
at::Tensor d_bias); at::Tensor d_bias);
template <Int Dimension> template <Int Dimension>
double PermutohedralSubmanifoldConvolution_updateOutput(
at::Tensor inputSize, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, at::Tensor weight, at::Tensor bias);
template <Int Dimension>
void PermutohedralSubmanifoldConvolution_backward(
at::Tensor inputSize, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features, at::Tensor d_output_features,
at::Tensor weight, at::Tensor d_weight, at::Tensor d_bias);
template <Int Dimension>
double FullConvolution_updateOutput( double FullConvolution_updateOutput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize,
at::Tensor filterStride, Metadata<Dimension> &mIn, at::Tensor filterStride, Metadata<Dimension> &mIn,
......
...@@ -206,6 +206,22 @@ void SubmanifoldConvolution_backward( ...@@ -206,6 +206,22 @@ void SubmanifoldConvolution_backward(
d_output_features, weight, d_weight, d_bias); d_output_features, weight, d_weight, d_bias);
} }
template <Int Dimension> template <Int Dimension>
double PermutohedralSubmanifoldConvolution_updateOutput(
at::Tensor inputSize, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, at::Tensor weight, at::Tensor bias) {
return cpu_PermutohedralSubmanifoldConvolution_updateOutput<float, Dimension>(
inputSize, m, input_features, output_features, weight, bias);
}
template <Int Dimension>
void PermutohedralSubmanifoldConvolution_backward(
at::Tensor inputSize, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features, at::Tensor d_output_features,
at::Tensor weight, at::Tensor d_weight, at::Tensor d_bias) {
cpu_PermutohedralSubmanifoldConvolution_backward<float, Dimension>(
inputSize, m, input_features, d_input_features, d_output_features, weight,
d_weight, d_bias);
}
template <Int Dimension>
double FullConvolution_updateOutput( double FullConvolution_updateOutput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize,
at::Tensor filterStride, Metadata<Dimension> &mIn, at::Tensor filterStride, Metadata<Dimension> &mIn,
...@@ -439,6 +455,15 @@ void UnPooling_updateGradInput(at::Tensor inputSize, at::Tensor outputSize, ...@@ -439,6 +455,15 @@ void UnPooling_updateGradInput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor input_features, at::Tensor d_input_features, \ at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight, \ at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight, \
at::Tensor d_bias); \ at::Tensor d_bias); \
template double PermutohedralSubmanifoldConvolution_updateOutput<DIMENSION>( \
at::Tensor inputSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor weight, at::Tensor bias); \
template void PermutohedralSubmanifoldConvolution_backward<DIMENSION>( \
at::Tensor inputSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight, \
at::Tensor d_bias); \
template double FullConvolution_updateOutput<DIMENSION>( \ template double FullConvolution_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \ at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & mIn, \ at::Tensor filterStride, Metadata<DIMENSION> & mIn, \
......
...@@ -310,6 +310,33 @@ void SubmanifoldConvolution_backward( ...@@ -310,6 +310,33 @@ void SubmanifoldConvolution_backward(
d_output_features, weight, d_weight, d_bias); d_output_features, weight, d_weight, d_bias);
} }
template <Int Dimension> template <Int Dimension>
double PermutohedralSubmanifoldConvolution_updateOutput(
at::Tensor inputSize, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, at::Tensor weight, at::Tensor bias) {
if (input_features.type().is_cuda())
return cuda_PermutohedralSubmanifoldConvolution_updateOutput<float,
Dimension>(
inputSize, m, input_features, output_features, weight, bias);
else
return cpu_PermutohedralSubmanifoldConvolution_updateOutput<float,
Dimension>(
inputSize, m, input_features, output_features, weight, bias);
}
template <Int Dimension>
void PermutohedralSubmanifoldConvolution_backward(
at::Tensor inputSize, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features, at::Tensor d_output_features,
at::Tensor weight, at::Tensor d_weight, at::Tensor d_bias) {
if (d_output_features.type().is_cuda())
cuda_PermutohedralSubmanifoldConvolution_backward<float, Dimension>(
inputSize, m, input_features, d_input_features, d_output_features,
weight, d_weight, d_bias);
else
cpu_PermutohedralSubmanifoldConvolution_backward<float, Dimension>(
inputSize, m, input_features, d_input_features, d_output_features,
weight, d_weight, d_bias);
}
template <Int Dimension>
double FullConvolution_updateOutput( double FullConvolution_updateOutput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize,
at::Tensor filterStride, Metadata<Dimension> &mIn, at::Tensor filterStride, Metadata<Dimension> &mIn,
...@@ -645,6 +672,15 @@ void UnPooling_updateGradInput(at::Tensor inputSize, at::Tensor outputSize, ...@@ -645,6 +672,15 @@ void UnPooling_updateGradInput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor input_features, at::Tensor d_input_features, \ at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight, \ at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight, \
at::Tensor d_bias); \ at::Tensor d_bias); \
template double PermutohedralSubmanifoldConvolution_updateOutput<DIMENSION>( \
at::Tensor inputSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor weight, at::Tensor bias); \
template void PermutohedralSubmanifoldConvolution_backward<DIMENSION>( \
at::Tensor inputSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight, \
at::Tensor d_bias); \
template double FullConvolution_updateOutput<DIMENSION>( \ template double FullConvolution_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \ at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & mIn, \ at::Tensor filterStride, Metadata<DIMENSION> & mIn, \
......
...@@ -22,6 +22,7 @@ from .maxPooling import MaxPooling ...@@ -22,6 +22,7 @@ from .maxPooling import MaxPooling
from .metadata import Metadata from .metadata import Metadata
from .networkArchitectures import * from .networkArchitectures import *
from .networkInNetwork import NetworkInNetwork from .networkInNetwork import NetworkInNetwork
from .permutohedralSubmanifoldConvolution import PermutohedralSubmanifoldConvolution, permutohedral_basis
from .randomizedStrideConvolution import RandomizedStrideConvolution from .randomizedStrideConvolution import RandomizedStrideConvolution
from .randomizedStrideMaxPooling import RandomizedStrideMaxPooling from .randomizedStrideMaxPooling import RandomizedStrideMaxPooling
from .sequential import Sequential from .sequential import Sequential
...@@ -32,4 +33,4 @@ from .spectral_norm import spectral_norm ...@@ -32,4 +33,4 @@ from .spectral_norm import spectral_norm
from .submanifoldConvolution import SubmanifoldConvolution, ValidConvolution from .submanifoldConvolution import SubmanifoldConvolution, ValidConvolution
from .tables import * from .tables import *
from .unPooling import UnPooling from .unPooling import UnPooling
from .utils import append_tensors, AddCoords, add_feature_planes, concatenate_feature_planes from .utils import append_tensors, AddCoords, add_feature_planes, concatenate_feature_planes, compare_sparse
...@@ -16,7 +16,7 @@ from .batchNormalization import BatchNormalization ...@@ -16,7 +16,7 @@ from .batchNormalization import BatchNormalization
class Sigmoid(Module): class Sigmoid(Module):
def forward(self, input): def forward(self, input):
output = SparseConvNetTensor() output = SparseConvNetTensor()
output.features = F.sigmoid(input.features) output.features = torch.sigmoid(input.features)
output.metadata = input.metadata output.metadata = input.metadata
output.spatial_size = input.spatial_size output.spatial_size = input.spatial_size
return output return output
...@@ -37,7 +37,7 @@ class LeakyReLU(Module): ...@@ -37,7 +37,7 @@ class LeakyReLU(Module):
class Tanh(Module): class Tanh(Module):
def forward(self, input): def forward(self, input):
output = SparseConvNetTensor() output = SparseConvNetTensor()
output.features = F.tanh(input.features) output.features = torch.tanh(input.features)
output.metadata = input.metadata output.metadata = input.metadata
output.spatial_size = input.spatial_size output.spatial_size = input.spatial_size
return output return output
......
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sparseconvnet
import sparseconvnet_SCN
from torch.autograd import Function
from torch.nn import Module, Parameter
from .utils import *
from .sparseConvNetTensor import SparseConvNetTensor
def permutohedral_basis(dimension):
"""
Calculate two matrices: a, a_inverse
Use torch.mm(coordinates, a_inverse) to map into permutohedral coordinates space, before input goes to SparseConvNet
"""
a=torch.zeros(dimension,dimension)
for i in range(dimension):
for j in range(i):
dp=(a[i,:]*a[j,:]).sum()
a[i,j]=(0.5-dp)/a[j,j]
dp=(a[i,:]*a[i,:]).sum()
a[i,i]=(1-dp)**0.5
ai=torch.inverse(a)
return a, ai
class PermutohedralSubmanifoldConvolution(Module):
def __init__(self, dimension, nIn, nOut, bias):
Module.__init__(self)
self.dimension = dimension
self.nIn = nIn
self.nOut = nOut
self.filter_volume = dimension**2 + dimension + 1
std = (2.0 / nIn / self.filter_volume)**0.5
self.weight = Parameter(torch.Tensor(
self.filter_volume, nIn, nOut
).normal_(0, std))
if bias:
self.bias = Parameter(torch.Tensor(nOut).zero_())
def forward(self, input):
assert input.features.nelement() == 0 or input.features.size(1) == self.nIn
output = SparseConvNetTensor()
output.metadata = input.metadata
output.spatial_size = input.spatial_size
output.features = PermutohedralSubmanifoldConvolutionFunction.apply(
input.features,
self.weight,
optionalTensor(self, 'bias'),
input.metadata,
input.spatial_size,
self.dimension)
return output
def __repr__(self):
s = 'PermutohedralSubmanifoldConvolution'
return s
def input_spatial_size(self, out_size):
return out_size
class PermutohedralSubmanifoldConvolutionFunction(Function):
@staticmethod
def forward(
ctx,
input_features,
weight,
bias,
input_metadata,
spatial_size,
dimension):
ctx.input_metadata = input_metadata
ctx.dimension = dimension
output_features = input_features.new()
ctx.save_for_backward(
input_features,
spatial_size,
weight,
bias)
sparseconvnet.forward_pass_multiplyAdd_count +=\
sparseconvnet_SCN.PermutohedralSubmanifoldConvolution_updateOutput(
spatial_size,
input_metadata,
input_features,
output_features,
weight,
bias)
sparseconvnet.forward_pass_hidden_states += output_features.nelement()
return output_features
@staticmethod
def backward(ctx, grad_output):
input_features, spatial_size, weight, bias = ctx.saved_tensors
grad_input = grad_output.new()
grad_weight = torch.zeros_like(weight)
grad_bias = torch.zeros_like(bias)
sparseconvnet_SCN.PermutohedralSubmanifoldConvolution_backward(
spatial_size,
ctx.input_metadata,
input_features,
grad_input,
grad_output.contiguous(),
weight,
grad_weight,
grad_bias)
return grad_input, grad_weight, optionalTensorReturn(grad_bias), None, None, None, None
...@@ -4,9 +4,9 @@ ...@@ -4,9 +4,9 @@
# This source code is licensed under the license found in the # This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree. # LICENSE file in the root directory of this source tree.
from torch.nn import Sequential as S import torch
class Sequential(S): class Sequential(torch.nn.Sequential):
def input_spatial_size(self, out_size): def input_spatial_size(self, out_size):
for m in reversed(self._modules): for m in reversed(self._modules):
out_size = self._modules[m].input_spatial_size(out_size) out_size = self._modules[m].input_spatial_size(out_size)
...@@ -15,3 +15,19 @@ class Sequential(S): ...@@ -15,3 +15,19 @@ class Sequential(S):
def add(self, module): def add(self, module):
self._modules[str(len(self._modules))] = module self._modules[str(len(self._modules))] = module
return self return self
def reweight(self, input):
for module in self._modules.values():
if isinstance(module, Sequential):
input = module.reweight(input)
elif hasattr(input, 'features') and hasattr(module, 'weight') and hasattr(module, 'bias'):
f = module(input).features
f = f - module.bias
s = f.std(0)
f = f / s
module.weight = torch.nn.Parameter(module.weight/s)
module.bias = torch.nn.Parameter(-f.mean(0))
input = module(input)
else:
input = module(input)
return input
...@@ -43,10 +43,13 @@ class SparseConvNetTensor(object): ...@@ -43,10 +43,13 @@ class SparseConvNetTensor(object):
self.spatialSize = None self.spatialSize = None
def __repr__(self): def __repr__(self):
sl = self.get_spatial_locations() if self.metadata else None
return 'SparseConvNetTensor<<' + \ return 'SparseConvNetTensor<<' + \
'features=' + repr(self.features) + \ 'features=' + repr(self.features) + \
'coordinates=' + repr(self.get_spatial_locations() if self.metadata else None) + \ ',features.shape=' + repr(self.features.shape) + \
'spatial size=' + repr(self.spatial_size) + \ ',batch_locations=' + repr(sl) + \
',batch_locations.shape=' + repr(sl.shape if self.metadata else None) + \
',spatial size=' + repr(self.spatial_size) + \
'>>' '>>'
def to_variable(self, requires_grad=False, volatile=False): def to_variable(self, requires_grad=False, volatile=False):
......
...@@ -90,3 +90,19 @@ class AddCoords(torch.nn.Module): ...@@ -90,3 +90,19 @@ class AddCoords(torch.nn.Module):
output.metadata = input.metadata output.metadata = input.metadata
output.spatial_size = input.spatial_size output.spatial_size = input.spatial_size
return output return output
def compare_sparse(x, y):
cL,cR,L,R = x.metadata.compareSparseHelper(y.metadata, x.spatial_size)
if x.features.is_cuda:
cL=cL.cuda()
cR=cR.cuda()
L=L.cuda()
R=R.cuda()
e = 0
if cL.numel():
e += (x.features[cL]-y.features[cR]).pow(2).sum()
if L.numel():
e += x.features[L].pow(2).sum()
if R.numel():
e += y.features[R].pow(2).sum()
return e / (cL.numel() + L.numel() + R.numel())
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment