Unverified Commit 2082f213 authored by Ben Graham's avatar Ben Graham Committed by GitHub
Browse files

Merge pull request #118 from facebookresearch/references

Use references where possible
parents 1171aae3 d8c8a060
......@@ -16,10 +16,10 @@ void InputLayer_bp(T *d_input_features, T *d_output_features, Int nRows,
template <typename T, Int Dimension>
void cuda_InputLayer_updateOutput(Metadata<Dimension> &m,
/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor input_coords,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features,
/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &input_coords,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features,
long batchSize, long mode) {
m.inputLayer(spatialSize, input_coords, batchSize, mode);
......@@ -44,8 +44,8 @@ void cuda_InputLayer_updateOutput(Metadata<Dimension> &m,
template <typename T, Int Dimension>
void cuda_InputLayer_updateGradInput(
Metadata<Dimension> &m,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor d_output_features) {
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &d_output_features) {
auto &rules = m.inputLayerRuleBook;
Int nPlanes = d_output_features.size(1);
......@@ -69,8 +69,8 @@ void cuda_InputLayer_updateGradInput(
template <typename T, Int Dimension>
void cuda_OutputLayer_updateOutput(Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features) {
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features) {
auto &rules = m.inputLayerRuleBook;
Int nPlanes = input_features.size(1);
......@@ -93,8 +93,8 @@ void cuda_OutputLayer_updateOutput(Metadata<Dimension> &m,
template <typename T, Int Dimension>
void cuda_OutputLayer_updateGradInput(
Metadata<Dimension> &m,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor d_output_features) {
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &d_output_features) {
auto &rules = m.inputLayerRuleBook;
Int nPlanes = d_output_features.size(1);
......@@ -118,10 +118,10 @@ void cuda_OutputLayer_updateGradInput(
template <typename T, Int Dimension>
void cuda_BLInputLayer_updateOutput(Metadata<Dimension> &m,
/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor input_coords,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features,
/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &input_coords,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features,
long mode) {
m.blLayer(spatialSize, input_coords, mode);
......@@ -148,8 +148,8 @@ void cuda_BLInputLayer_updateOutput(Metadata<Dimension> &m,
template <typename T, Int Dimension>
void cuda_BLInputLayer_updateGradInput(
Metadata<Dimension> &m,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor d_output_features) {
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &d_output_features) {
auto &rules = m.blLayerRuleBook;
Int nPlanes = d_output_features.size(1);
......@@ -176,8 +176,8 @@ void cuda_BLInputLayer_updateGradInput(
template <typename T, Int Dimension>
void cuda_BLOutputLayer_updateOutput(
Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features) {
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features) {
auto &rules = m.blLayerRuleBook;
Int nPlanes = input_features.size(1);
......@@ -201,8 +201,8 @@ void cuda_BLOutputLayer_updateOutput(
template <typename T, Int Dimension>
void cuda_BLOutputLayer_updateGradInput(
Metadata<Dimension> &m,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor d_output_features) {
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &d_output_features) {
auto &rules = m.blLayerRuleBook;
Int nPlanes = d_output_features.size(2);
......
......@@ -11,8 +11,8 @@ void LeakyReLU_bp(T *input_features, T *d_input_features, T *output_features,
Int n, T alpha);
template <typename T>
void cuda_LeakyReLU_updateOutput(/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features,
void cuda_LeakyReLU_updateOutput(/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features,
T alpha) {
output_features.resize_as_(input_features);
auto n = input_features.numel();
......@@ -22,9 +22,9 @@ void cuda_LeakyReLU_updateOutput(/*cuda float*/ at::Tensor input_features,
template <typename T>
void cuda_LeakyReLU_updateGradInput(
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor d_output_features, T alpha) {
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &d_output_features, T alpha) {
d_input_features.resize_as_(d_output_features);
auto n = d_input_features.numel();
LeakyReLU_bp<T>(input_features.data<T>(), d_input_features.data<T>(),
......
......@@ -16,14 +16,14 @@ void cuda_MaxPooling_BackwardPass(T *input_features, T *d_input_features,
template <typename T, Int Dimension>
void cuda_MaxPooling_updateOutput(
/*long*/ at::Tensor inputSize, /*long*/ at::Tensor outputSize,
/*long*/ at::Tensor poolSize,
/*long*/ at::Tensor poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features, long nFeaturesToDrop) {
/*long*/ at::Tensor &inputSize, /*long*/ at::Tensor &outputSize,
/*long*/ at::Tensor &poolSize,
/*long*/ at::Tensor &poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features, long nFeaturesToDrop) {
Int nPlanes = input_features.size(1) - nFeaturesToDrop;
auto _rules =
const auto &_rules =
m.getRuleBook(inputSize, outputSize, poolSize, poolStride, true);
Int nActive = m.getNActive(outputSize);
output_features.resize_({nActive, nPlanes});
......@@ -36,16 +36,16 @@ void cuda_MaxPooling_updateOutput(
}
template <typename T, Int Dimension>
void cuda_MaxPooling_updateGradInput(
/*long*/ at::Tensor inputSize, /*long*/ at::Tensor outputSize,
/*long*/ at::Tensor poolSize,
/*long*/ at::Tensor poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor output_features,
/*cuda float*/ at::Tensor d_output_features, long nFeaturesToDrop) {
/*long*/ at::Tensor &inputSize, /*long*/ at::Tensor &outputSize,
/*long*/ at::Tensor &poolSize,
/*long*/ at::Tensor &poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &output_features,
/*cuda float*/ at::Tensor &d_output_features, long nFeaturesToDrop) {
Int nPlanes = input_features.size(1) - nFeaturesToDrop;
auto _rules =
const auto &_rules =
m.getRuleBook(inputSize, outputSize, poolSize, poolStride, true);
d_input_features.resize_as_(input_features);
d_input_features.zero_();
......@@ -60,14 +60,14 @@ void cuda_MaxPooling_updateGradInput(
}
template <typename T, Int Dimension>
void cuda_RandomizedStrideMaxPooling_updateOutput(
/*long*/ at::Tensor inputSize, /*long*/ at::Tensor outputSize,
/*long*/ at::Tensor poolSize,
/*long*/ at::Tensor poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features, long nFeaturesToDrop) {
/*long*/ at::Tensor &inputSize, /*long*/ at::Tensor &outputSize,
/*long*/ at::Tensor &poolSize,
/*long*/ at::Tensor &poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features, long nFeaturesToDrop) {
Int nPlanes = input_features.size(1) - nFeaturesToDrop;
auto _rules = m.getRandomizedStrideRuleBook(inputSize, outputSize, poolSize,
const auto &_rules = m.getRandomizedStrideRuleBook(inputSize, outputSize, poolSize,
poolStride, true);
Int nActive = m.getNActive(outputSize);
output_features.resize_({nActive, nPlanes});
......@@ -80,16 +80,16 @@ void cuda_RandomizedStrideMaxPooling_updateOutput(
}
template <typename T, Int Dimension>
void cuda_RandomizedStrideMaxPooling_updateGradInput(
/*long*/ at::Tensor inputSize, /*long*/ at::Tensor outputSize,
/*long*/ at::Tensor poolSize,
/*long*/ at::Tensor poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor output_features,
/*cuda float*/ at::Tensor d_output_features, long nFeaturesToDrop) {
/*long*/ at::Tensor &inputSize, /*long*/ at::Tensor &outputSize,
/*long*/ at::Tensor &poolSize,
/*long*/ at::Tensor &poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &output_features,
/*cuda float*/ at::Tensor &d_output_features, long nFeaturesToDrop) {
Int nPlanes = input_features.size(1) - nFeaturesToDrop;
auto _rules = m.getRandomizedStrideRuleBook(inputSize, outputSize, poolSize,
const auto &_rules = m.getRandomizedStrideRuleBook(inputSize, outputSize, poolSize,
poolStride, true);
d_input_features.resize_as_(input_features);
d_input_features.zero_();
......
......@@ -15,9 +15,9 @@ void cuda_SparseToDense_BackwardPass(T *d_input_features, T *d_output_features,
template <typename T, Int Dimension>
void cuda_SparseToDense_updateOutput(
/*long*/ at::Tensor inputSize, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features, long nPlanes) {
/*long*/ at::Tensor &inputSize, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features, long nPlanes) {
{
std::array<long, Dimension + 2> sz;
......@@ -30,7 +30,7 @@ void cuda_SparseToDense_updateOutput(
output_features.zero_();
}
if (input_features.ndimension() == 2) {
auto _rules = m.getSparseToDenseRuleBook(inputSize, true);
const auto &_rules = m.getSparseToDenseRuleBook(inputSize, true);
Int _nPlanes = input_features.size(1);
auto iF = input_features.data<T>();
auto oF = output_features.data<T>();
......@@ -40,16 +40,16 @@ void cuda_SparseToDense_updateOutput(
}
template <typename T, Int Dimension>
void cuda_SparseToDense_updateGradInput(
/*long*/ at::Tensor inputSize, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor d_output_features) {
/*long*/ at::Tensor &inputSize, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &d_output_features) {
d_input_features.resize_as_(input_features);
d_input_features.zero_();
if (input_features.ndimension() == 2) {
auto _rules = m.getSparseToDenseRuleBook(inputSize, true);
const auto &_rules = m.getSparseToDenseRuleBook(inputSize, true);
long spatialVolume = inputSize.prod().data<long>()[0];
Int _nPlanes = d_input_features.size(1);
auto diF = d_input_features.data<T>();
......
......@@ -15,14 +15,14 @@ void cuda_UnPooling_BackwardPass(T *d_input_features, T *d_output_features,
template <typename T, Int Dimension>
void cuda_UnPooling_updateOutput(
/*long*/ at::Tensor inputSize, /*long*/ at::Tensor outputSize,
/*long*/ at::Tensor poolSize,
/*long*/ at::Tensor poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features, long nFeaturesToDrop) {
/*long*/ at::Tensor &inputSize, /*long*/ at::Tensor &outputSize,
/*long*/ at::Tensor &poolSize,
/*long*/ at::Tensor &poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features, long nFeaturesToDrop) {
Int nPlanes = input_features.size(1) - nFeaturesToDrop;
auto _rules =
const auto &_rules =
m.getRuleBook(outputSize, inputSize, poolSize, poolStride, true);
Int nActive = m.getNActive(outputSize);
output_features.resize_({nActive, input_features.size(1) - nFeaturesToDrop});
......@@ -37,14 +37,14 @@ void cuda_UnPooling_updateOutput(
template <typename T, Int Dimension>
void cuda_UnPooling_updateGradInput(
/*long*/ at::Tensor inputSize, /*long*/ at::Tensor outputSize,
/*long*/ at::Tensor poolSize,
/*long*/ at::Tensor poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor d_output_features, long nFeaturesToDrop) {
/*long*/ at::Tensor &inputSize, /*long*/ at::Tensor &outputSize,
/*long*/ at::Tensor &poolSize,
/*long*/ at::Tensor &poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &d_output_features, long nFeaturesToDrop) {
Int nPlanes = d_input_features.size(1) - nFeaturesToDrop;
auto _rules =
const auto &_rules =
m.getRuleBook(outputSize, inputSize, poolSize, poolStride, true);
auto diF = d_input_features.data<T>() + nFeaturesToDrop;
......
......@@ -15,7 +15,6 @@ void Convolution_InputSgToRulesAndOutputSg(SparseGrid<dimension> &inputGrid,
long *stride, long *inputSpatialSize,
long *outputSpatialSize) {
rules.resize(volume<dimension>(size));
for (auto const &inIter : inputGrid.mp) {
auto outRegion = OutputRegionCalculator<dimension>(
inIter.first, size, stride, outputSpatialSize);
......
......@@ -22,7 +22,7 @@ template <Int dimension> SparseGrid<dimension>::SparseGrid() : ctr(0) {
mp.set_empty_key(empty_key);
}
template <typename T> T *OptionalTensorData(at::Tensor tensor) {
template <typename T> T *OptionalTensorData(at::Tensor &tensor) {
return tensor.numel() ? tensor.data<T>() : nullptr;
}
......@@ -30,9 +30,9 @@ template <Int dimension>
void addPointToSparseGridMapAndFeatures(SparseGridMap<dimension> &mp,
Point<dimension> p, Int &nActive,
long nPlanes,
/*float*/ at::Tensor features,
/*float*/ at::Tensor &features,
float *vec, bool overwrite) {
auto mapVal = mp.insert(std::make_pair(p, nActive));
if (mapVal.second) {
nActive++;
......@@ -65,16 +65,17 @@ template <Int dimension> void Metadata<dimension>::clear() {
blLayerRuleBook.clear();
}
template <Int dimension>
Int Metadata<dimension>::getNActive(/*long*/ at::Tensor spatialSize) {
Int Metadata<dimension>::getNActive(/*long*/ at::Tensor &spatialSize) {
return nActive[LongTensorToPoint<dimension>(spatialSize)];
};
template <Int dimension>
SparseGrids<dimension> &
Metadata<dimension>::getSparseGrid(/*long*/ at::Tensor spatialSize) {
Metadata<dimension>::getSparseGrid(/*long*/ at::Tensor &spatialSize) {
return grids[LongTensorToPoint<dimension>(spatialSize)];
};
template <Int dimension>
void Metadata<dimension>::setInputSpatialSize(/*long*/ at::Tensor spatialSize) {
void Metadata<dimension>::setInputSpatialSize(
/*long*/ at::Tensor &spatialSize) {
inputSpatialSize = LongTensorToPoint<dimension>(spatialSize);
inputSGs = &grids[inputSpatialSize];
inputNActive = &nActive[inputSpatialSize];
......@@ -85,10 +86,10 @@ template <Int dimension> void Metadata<dimension>::batchAddSample() {
inputSG = &inputSGs->back();
}
template <Int dimension>
void Metadata<dimension>::setInputSpatialLocation(/*float*/ at::Tensor features,
/*long*/ at::Tensor location,
/*float*/ at::Tensor vec,
bool overwrite) {
void Metadata<dimension>::setInputSpatialLocation(
/*float*/ at::Tensor &features,
/*long*/ at::Tensor &location,
/*float*/ at::Tensor &vec, bool overwrite) {
auto p = LongTensorToPoint<dimension>(location);
SparseGridMap<dimension> &mp = inputSG->mp;
Int &nActive = *inputNActive;
......@@ -98,9 +99,9 @@ void Metadata<dimension>::setInputSpatialLocation(/*float*/ at::Tensor features,
}
template <Int dimension>
void Metadata<dimension>::setInputSpatialLocations(
/*float*/ at::Tensor features,
/*long*/ at::Tensor locations,
/*float*/ at::Tensor vecs, bool overwrite) {
/*float*/ at::Tensor &features,
/*long*/ at::Tensor &locations,
/*float*/ at::Tensor &vecs, bool overwrite) {
/* assert(locations.ndimension() == 2 and "locations must be 2
* dimensional!"); */
/* assert(vecs.ndimension() == 2 and "vecs must be 2 dimensional!"); */
......@@ -147,7 +148,7 @@ void Metadata<dimension>::setInputSpatialLocations(
template <Int dimension>
at::Tensor
Metadata<dimension>::getSpatialLocations(/*long*/ at::Tensor spatialSize) {
Metadata<dimension>::getSpatialLocations(/*long*/ at::Tensor &spatialSize) {
Int nActive = getNActive(spatialSize);
auto &SGs = getSparseGrid(spatialSize);
Int batchSize = SGs.size();
......@@ -169,8 +170,8 @@ Metadata<dimension>::getSpatialLocations(/*long*/ at::Tensor spatialSize) {
}
template <Int dimension>
void Metadata<dimension>::createMetadataForDenseToSparse(
/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor nz_, long batchSize) {
/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &nz_, long batchSize) {
clear();
setInputSpatialSize(spatialSize);
inputSGs->resize(batchSize);
......@@ -208,9 +209,9 @@ void Metadata<dimension>::createMetadataForDenseToSparse(
template <Int dimension>
void Metadata<dimension>::sparsifyMetadata(Metadata<dimension> &mOut,
/*long*/ at::Tensor spatialSize,
/*byte*/ at::Tensor filter,
/*long*/ at::Tensor cuSum) {
/*long*/ at::Tensor &spatialSize,
/*byte*/ at::Tensor &filter,
/*long*/ at::Tensor &cuSum) {
// Create a new SparseGrids with fewer entries.
mOut.clear();
auto p = LongTensorToPoint<dimension>(spatialSize);
......@@ -240,7 +241,7 @@ void Metadata<dimension>::sparsifyMetadata(Metadata<dimension> &mOut,
template <Int dimension>
void Metadata<dimension>::appendMetadata(Metadata<dimension> &mAdd,
/*long*/ at::Tensor spatialSize) {
/*long*/ at::Tensor &spatialSize) {
auto p = LongTensorToPoint<dimension>(spatialSize);
auto &sgs1 = grids[p];
auto &sgs2 = mAdd.grids[p];
......@@ -257,7 +258,7 @@ void Metadata<dimension>::appendMetadata(Metadata<dimension> &mAdd,
template <Int dimension>
std::vector<at::Tensor>
Metadata<dimension>::sparsifyCompare(Metadata<dimension> &mGT,
/*long*/ at::Tensor spatialSize) {
/*long*/ at::Tensor &spatialSize) {
auto p = LongTensorToPoint<dimension>(spatialSize);
at::Tensor gt = torch::zeros({nActive[p]}, at::kByte);
at::Tensor ref_map = torch::empty({mGT.nActive[p]}, at::kLong);
......@@ -288,10 +289,10 @@ Metadata<dimension>::sparsifyCompare(Metadata<dimension> &mGT,
// size[dimension] == #feature planes
template <Int dimension>
void Metadata<dimension>::addSampleFromThresholdedTensor(
/*float*/ at::Tensor features_,
/*float*/ at::Tensor tensor_,
/*long*/ at::Tensor offset_,
/*long*/ at::Tensor spatialSize_, float threshold) {
/*float*/ at::Tensor &features_,
/*float*/ at::Tensor &tensor_,
/*long*/ at::Tensor &offset_,
/*long*/ at::Tensor &spatialSize_, float threshold) {
auto &nActive = *inputNActive;
auto &SGs = *inputSGs;
......@@ -404,8 +405,8 @@ template <Int dimension> void Metadata<dimension>::generateRuleBooks2s2() {
}
template <Int dimension>
void Metadata<dimension>::inputLayer(/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor coords, Int batchSize,
void Metadata<dimension>::inputLayer(/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &coords, Int batchSize,
Int mode) {
assert(spatialSize.ndimension() == 1);
assert(spatialSize.size(0) == dimension);
......@@ -417,8 +418,8 @@ void Metadata<dimension>::inputLayer(/*long*/ at::Tensor spatialSize,
*inputNActive);
}
template <Int dimension>
void Metadata<dimension>::blLayer(/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor coords, Int mode) {
void Metadata<dimension>::blLayer(/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &coords, Int mode) {
assert(spatialSize.ndimension() == 1);
assert(spatialSize.size(0) == dimension);
assert(coords.ndimension() == 3);
......@@ -429,8 +430,8 @@ void Metadata<dimension>::blLayer(/*long*/ at::Tensor spatialSize,
}
template <Int dimension>
RuleBook &Metadata<dimension>::getSubmanifoldRuleBook(
/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor size, bool openMP) {
/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &size, bool openMP) {
auto p = TwoLongTensorsToPoint<dimension>(spatialSize, size);
auto &rb = submanifoldRuleBooks[p];
if (rb.empty()) {
......@@ -444,7 +445,7 @@ RuleBook &Metadata<dimension>::getSubmanifoldRuleBook(
}
template <Int dimension>
RuleBook &Metadata<dimension>::getPermutohedralSubmanifoldRuleBook(
/*long*/ at::Tensor spatialSize, bool openMP) {
/*long*/ at::Tensor &spatialSize, bool openMP) {
auto p = LongTensorToPoint<dimension>(spatialSize);
auto &rb = permutohedralRuleBooks[p];
if (rb.empty()) {
......@@ -458,7 +459,7 @@ RuleBook &Metadata<dimension>::getPermutohedralSubmanifoldRuleBook(
}
template <Int dimension>
RuleBook &Metadata<dimension>::getActivePoolingRuleBook(
/*long*/ at::Tensor spatialSize) {
/*long*/ at::Tensor &spatialSize) {
auto spatialSz = LongTensorToPoint<dimension>(spatialSize);
auto &SGs = grids[spatialSz];
auto &rb = activePoolingRuleBooks[spatialSz];
......@@ -468,7 +469,7 @@ RuleBook &Metadata<dimension>::getActivePoolingRuleBook(
}
template <Int dimension>
RuleBook &Metadata<dimension>::getSparseToDenseRuleBook(
/*long*/ at::Tensor spatialSize, bool openMP) {
/*long*/ at::Tensor &spatialSize, bool openMP) {
auto ss = LongTensorToPoint<dimension>(spatialSize);
auto &SGs = grids[ss];
auto &rb = sparseToDenseRuleBooks[ss];
......@@ -484,10 +485,10 @@ RuleBook &Metadata<dimension>::getSparseToDenseRuleBook(
}
template <Int dimension>
RuleBook &Metadata<dimension>::getRuleBook(
/*long*/ at::Tensor inputSpatialSize,
/*long*/ at::Tensor outputSpatialSize,
/*long*/ at::Tensor size,
/*long*/ at::Tensor stride, bool openMP) {
/*long*/ at::Tensor &inputSpatialSize,
/*long*/ at::Tensor &outputSpatialSize,
/*long*/ at::Tensor &size,
/*long*/ at::Tensor &stride, bool openMP) {
auto p = ThreeLongTensorsToPoint<dimension>(inputSpatialSize, size, stride);
auto &rb = ruleBooks[p];
if (rb.empty()) {
......@@ -511,10 +512,10 @@ RuleBook &Metadata<dimension>::getRuleBook(
}
template <Int dimension>
RuleBook &Metadata<dimension>::getFullConvolutionRuleBook(
/*long*/ at::Tensor inputSpatialSize,
/*long*/ at::Tensor outputSpatialSize,
/*long*/ at::Tensor size,
/*long*/ at::Tensor stride, Metadata<dimension> &newM) {
/*long*/ at::Tensor &inputSpatialSize,
/*long*/ at::Tensor &outputSpatialSize,
/*long*/ at::Tensor &size,
/*long*/ at::Tensor &stride, Metadata<dimension> &newM) {
auto &rb = newM.fullConvolutionRuleBook;
if (rb.empty()) {
newM.clear();
......@@ -533,10 +534,10 @@ RuleBook &Metadata<dimension>::getFullConvolutionRuleBook(
template <Int dimension>
RuleBook &Metadata<dimension>::getRandomizedStrideRuleBook(
/*long*/ at::Tensor inputSpatialSize,
/*long*/ at::Tensor outputSpatialSize,
/*long*/ at::Tensor size,
/*long*/ at::Tensor stride, bool openMP) {
/*long*/ at::Tensor &inputSpatialSize,
/*long*/ at::Tensor &outputSpatialSize,
/*long*/ at::Tensor &size,
/*long*/ at::Tensor &stride, bool openMP) {
auto p = ThreeLongTensorsToPoint<dimension>(inputSpatialSize, size, stride);
auto &rb = ruleBooks[p];
if (rb.empty()) {
......@@ -577,7 +578,7 @@ at::Tensor vvl2t(std::vector<std::vector<long>> v) {
template <Int dimension>
std::vector<at::Tensor>
Metadata<dimension>::compareSparseHelper(Metadata<dimension> &mR,
/* long */ at::Tensor spatialSize) {
/* long */ at::Tensor &spatialSize) {
auto p = LongTensorToPoint<dimension>(spatialSize);
auto &sgsL = grids[p];
auto &sgsR = mR.grids[p];
......@@ -624,7 +625,7 @@ at::Tensor vvl2t_(std::vector<std::vector<Int>> v) {
template <Int dimension>
at::Tensor
Metadata<dimension>::copyFeaturesHelper(Metadata<dimension> &mR,
/* long */ at::Tensor spatialSize) {
/* long */ at::Tensor &spatialSize) {
auto p = LongTensorToPoint<dimension>(spatialSize);
auto &sgsL = grids[p];
auto &sgsR = mR.grids[p];
......
......@@ -38,7 +38,7 @@ template <Int dimension>
void addPointToSparseGridMapAndFeatures(SparseGridMap<dimension> &mp,
Point<dimension> p, Int &nActive,
long nPlanes,
/*float*/ at::Tensor features,
/*float*/ at::Tensor &features,
float *vec, bool overwrite);
template <Int dimension> class Metadata {
......@@ -81,45 +81,47 @@ public:
Metadata();
void clear();
Int getNActive(/*long*/ at::Tensor spatialSize);
SparseGrids<dimension> &getSparseGrid(/*long*/ at::Tensor spatialSize);
void setInputSpatialSize(/*long*/ at::Tensor spatialSize);
Int getNActive(/*long*/ at::Tensor &spatialSize);
SparseGrids<dimension> &getSparseGrid(/*long*/ at::Tensor &spatialSize);
void setInputSpatialSize(/*long*/ at::Tensor &spatialSize);
void batchAddSample();
void setInputSpatialLocation(/*float*/ at::Tensor features,
/*long*/ at::Tensor location,
/*float*/ at::Tensor vec, bool overwrite);
void setInputSpatialLocations(/*float*/ at::Tensor features,
/*long*/ at::Tensor locations,
/*float*/ at::Tensor vecs, bool overwrite);
void setInputSpatialLocation(/*float*/ at::Tensor &features,
/*long*/ at::Tensor &location,
/*float*/ at::Tensor &vec, bool overwrite);
void setInputSpatialLocations(/*float*/ at::Tensor &features,
/*long*/ at::Tensor &locations,
/*float*/ at::Tensor &vecs, bool overwrite);
at::Tensor getSpatialLocations(/*long*/ at::Tensor spatialSize);
void createMetadataForDenseToSparse(/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor nz_, long batchSize);
at::Tensor getSpatialLocations(/*long*/ at::Tensor &spatialSize);
void createMetadataForDenseToSparse(/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &nz_, long batchSize);
void sparsifyMetadata(Metadata<dimension> &mOut,
/*long*/ at::Tensor spatialSize,
/*byte*/ at::Tensor filter,
/*long*/ at::Tensor cuSum);
/*long*/ at::Tensor &spatialSize,
/*byte*/ at::Tensor &filter,
/*long*/ at::Tensor &cuSum);
void appendMetadata(Metadata<dimension> &mAdd,
/*long*/ at::Tensor spatialSize);
/*long*/ at::Tensor &spatialSize);
/* std::vector<at::Tensor> sparsifyCompare(Metadata<dimension> &mReference, */
/* std::vector<at::Tensor &> sparsifyCompare(Metadata<dimension> &mReference,
*/
/* Metadata<dimension> &mSparsified,
*/
/* /\*long*\/ at::Tensor spatialSize);
/* /\*long*\/ at::Tensor &
* spatialSize);
*/
std::vector<at::Tensor> sparsifyCompare(Metadata<dimension> &mReference,
/*long*/ at::Tensor spatialSize);
/*long*/ at::Tensor &spatialSize);
// tensor is size[0] x .. x size[dimension-1] x size[dimension]
// size[0] x .. x size[dimension-1] == spatial volume
// size[dimension] == #feature planes
void addSampleFromThresholdedTensor(/*float*/ at::Tensor features_,
/*float*/ at::Tensor tensor_,
/*long*/ at::Tensor offset_,
/*long*/ at::Tensor spatialSize_,
void addSampleFromThresholdedTensor(/*float*/ at::Tensor &features_,
/*float*/ at::Tensor &tensor_,
/*long*/ at::Tensor &offset_,
/*long*/ at::Tensor &spatialSize_,
float threshold);
// 3x3 submanifold convolutions, 3x3/2x2 pooling or strided convolutions
......@@ -128,41 +130,42 @@ public:
// 3x3 submanifold convolutions, 2x2 pooling or strided convolutions
void generateRuleBooks2s2();
void inputLayer(/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor coords, Int batchSize, Int mode);
void blLayer(/*long*/ at::Tensor spatialSize, /*long*/ at::Tensor coords,
void inputLayer(/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &coords, Int batchSize, Int mode);
void blLayer(/*long*/ at::Tensor &spatialSize, /*long*/ at::Tensor &coords,
Int mode);
RuleBook &getSubmanifoldRuleBook(/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor size, bool openMP);
RuleBook &getPermutohedralSubmanifoldRuleBook(/*long*/ at::Tensor spatialSize,
bool openMP);
RuleBook &getActivePoolingRuleBook(/*long*/ at::Tensor spatialSize);
RuleBook &getSparseToDenseRuleBook(/*long*/ at::Tensor spatialSize,
RuleBook &getSubmanifoldRuleBook(/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &size, bool openMP);
RuleBook &
getPermutohedralSubmanifoldRuleBook(/*long*/ at::Tensor &spatialSize,
bool openMP);
RuleBook &getActivePoolingRuleBook(/*long*/ at::Tensor &spatialSize);
RuleBook &getSparseToDenseRuleBook(/*long*/ at::Tensor &spatialSize,
bool openMP);
RuleBook &getRuleBook(/*long*/ at::Tensor inputSpatialSize,
/*long*/ at::Tensor outputSpatialSize,
/*long*/ at::Tensor size,
/*long*/ at::Tensor stride, bool openMP);
RuleBook &getFullConvolutionRuleBook(/*long*/ at::Tensor inputSpatialSize,
/*long*/ at::Tensor outputSpatialSize,
/*long*/ at::Tensor size,
/*long*/ at::Tensor stride,
RuleBook &getRuleBook(/*long*/ at::Tensor &inputSpatialSize,
/*long*/ at::Tensor &outputSpatialSize,
/*long*/ at::Tensor &size,
/*long*/ at::Tensor &stride, bool openMP);
RuleBook &getFullConvolutionRuleBook(/*long*/ at::Tensor &inputSpatialSize,
/*long*/ at::Tensor &outputSpatialSize,
/*long*/ at::Tensor &size,
/*long*/ at::Tensor &stride,
Metadata<dimension> &newM);
RuleBook &getRandomizedStrideRuleBook(/*long*/ at::Tensor inputSpatialSize,
/*long*/ at::Tensor outputSpatialSize,
/*long*/ at::Tensor size,
/*long*/ at::Tensor stride,
RuleBook &getRandomizedStrideRuleBook(/*long*/ at::Tensor &inputSpatialSize,
/*long*/ at::Tensor &outputSpatialSize,
/*long*/ at::Tensor &size,
/*long*/ at::Tensor &stride,
bool openMP);
std::vector<at::Tensor>
std::vector<at::Tensor >
compareSparseHelper(Metadata<dimension> &mR,
/* long */ at::Tensor spatialSize);
/* long */ at::Tensor &spatialSize);
at::Tensor copyFeaturesHelper(Metadata<dimension> &mR,
/* long */ at::Tensor spatialSize);
/* long */ at::Tensor &spatialSize);
};
template <typename T> T *OptionalTensorData(at::Tensor tensor);
template <typename T> T *OptionalTensorData(at::Tensor &tensor);
template <Int dimension> Int volume(long *point);
#endif
......@@ -23,12 +23,12 @@
template void ActivePooling_ForwardPass<float>(float *input_features,
float *output_features,
Int batchSize, Int maxActive,
Int nPlanes, Int *rules,
Int nPlanes, const Int *rules,
bool average);
template void ActivePooling_BackwardPass<float>(float *d_input_features,
float *d_output_features,
Int batchSize, Int maxActive,
Int nPlanes, Int *rules,
Int nPlanes, const Int *rules,
bool average);
template void dAffineReluTrivialConvolution_forward<float>(
......
......@@ -7,9 +7,9 @@
// Helper function to draw pen strokes with
// nPlanes = 3, feature vector = (1,dx,dy)
void cpu_float_DrawCurve_2(Metadata<2> &m,
/*float*/ at::Tensor features,
/*float*/ at::Tensor stroke) {
at::Tensor location = at::zeros(at::CPU(at::kLong), {2});
/*float*/ at::Tensor &features,
/*float*/ at::Tensor &stroke) {
at::Tensor &location = at::zeros(at::CPU(at::kLong), {2});
auto location_ = location.data<long>();
auto vec = at::zeros(at::CPU(at::kFloat), {3});
......
......@@ -31,168 +31,168 @@ template <Int Dimension> void dimension(py::module &m, const char *name) {
.def("compareSparseHelper", &Metadata<Dimension>::compareSparseHelper)
.def("copyFeaturesHelper", &Metadata<Dimension>::copyFeaturesHelper);
m.def("ActivePooling_updateOutput",
(void (*)(at::Tensor, Metadata<Dimension> &, at::Tensor, at::Tensor,
(void (*)(at::Tensor&, Metadata<Dimension> &, at::Tensor&, at::Tensor&,
bool)) &
ActivePooling_updateOutput,
"");
m.def("ActivePooling_updateGradInput",
(void (*)(at::Tensor, Metadata<Dimension> &, at::Tensor, at::Tensor,
at::Tensor, bool)) &
(void (*)(at::Tensor&, Metadata<Dimension> &, at::Tensor&, at::Tensor&,
at::Tensor&, bool)) &
ActivePooling_updateGradInput,
"");
m.def("AveragePooling_updateOutput",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, long)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, long)) &
AveragePooling_updateOutput,
"");
m.def("AveragePooling_updateGradInput",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
long)) &
AveragePooling_updateGradInput,
"");
m.def("Convolution_updateOutput",
(double (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor)) &
(double (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&)) &
Convolution_updateOutput,
"");
m.def("Convolution_backward",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor, at::Tensor, at::Tensor)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&, at::Tensor&, at::Tensor&)) &
Convolution_backward,
"");
m.def("RandomizedStrideConvolution_updateOutput",
(double (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor)) &
(double (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&)) &
RandomizedStrideConvolution_updateOutput,
"");
m.def("RandomizedStrideConvolution_backward",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor, at::Tensor, at::Tensor)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&, at::Tensor&, at::Tensor&)) &
RandomizedStrideConvolution_backward,
"");
m.def("Deconvolution_updateOutput",
(double (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor)) &
(double (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&)) &
Deconvolution_updateOutput,
"");
m.def("Deconvolution_backward",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor, at::Tensor, at::Tensor)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&, at::Tensor&, at::Tensor&)) &
Deconvolution_backward,
"");
m.def("FullConvolution_updateOutput",
(double (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, Metadata<Dimension> &, at::Tensor,
at::Tensor, at::Tensor, at::Tensor)) &
(double (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, Metadata<Dimension> &, at::Tensor&,
at::Tensor&, at::Tensor&, at::Tensor&)) &
FullConvolution_updateOutput,
"");
m.def("FullConvolution_backward",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, Metadata<Dimension> &, at::Tensor,
at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, Metadata<Dimension> &, at::Tensor&,
at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&)) &
FullConvolution_backward,
"");
m.def("MaxPooling_updateOutput",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, long)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, long)) &
MaxPooling_updateOutput,
"");
m.def("MaxPooling_updateGradInput",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor, long)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&, long)) &
MaxPooling_updateGradInput,
"");
m.def("RandomizedStrideMaxPooling_updateOutput",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, long)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, long)) &
RandomizedStrideMaxPooling_updateOutput,
"");
m.def("RandomizedStrideMaxPooling_updateGradInput",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor, long)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&, long)) &
RandomizedStrideMaxPooling_updateGradInput,
"");
m.def("SparseToDense_updateOutput",
(void (*)(at::Tensor, Metadata<Dimension> &, at::Tensor, at::Tensor,
(void (*)(at::Tensor&, Metadata<Dimension> &, at::Tensor&, at::Tensor&,
long)) &
SparseToDense_updateOutput,
"");
m.def("SparseToDense_updateGradInput",
(void (*)(at::Tensor, Metadata<Dimension> &, at::Tensor, at::Tensor,
at::Tensor)) &
(void (*)(at::Tensor&, Metadata<Dimension> &, at::Tensor&, at::Tensor&,
at::Tensor&)) &
SparseToDense_updateGradInput,
"");
m.def("SubmanifoldConvolution_updateOutput",
(double (*)(at::Tensor, at::Tensor, Metadata<Dimension> &, at::Tensor,
at::Tensor, at::Tensor, at::Tensor)) &
(double (*)(at::Tensor&, at::Tensor&, Metadata<Dimension> &, at::Tensor&,
at::Tensor&, at::Tensor&, at::Tensor&)) &
SubmanifoldConvolution_updateOutput,
"");
m.def("SubmanifoldConvolution_backward",
(void (*)(at::Tensor, at::Tensor, Metadata<Dimension> &, at::Tensor,
at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor)) &
(void (*)(at::Tensor&, at::Tensor&, Metadata<Dimension> &, at::Tensor&,
at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&)) &
SubmanifoldConvolution_backward,
"");
m.def("PermutohedralSubmanifoldConvolution_updateOutput",
(double (*)(at::Tensor, Metadata<Dimension> &, at::Tensor, at::Tensor,
at::Tensor, at::Tensor)) &
(double (*)(at::Tensor&, Metadata<Dimension> &, at::Tensor&, at::Tensor&,
at::Tensor&, at::Tensor&)) &
PermutohedralSubmanifoldConvolution_updateOutput,
"");
m.def("PermutohedralSubmanifoldConvolution_backward",
(void (*)(at::Tensor, Metadata<Dimension> &, at::Tensor, at::Tensor,
at::Tensor, at::Tensor, at::Tensor, at::Tensor)) &
(void (*)(at::Tensor&, Metadata<Dimension> &, at::Tensor&, at::Tensor&,
at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&)) &
PermutohedralSubmanifoldConvolution_backward,
"");
m.def("InputLayer_updateOutput",
(void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor, long, long)) &
(void (*)(Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&, long, long)) &
InputLayer_updateOutput,
"");
m.def("InputLayer_updateGradInput",
(void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor)) &
(void (*)(Metadata<Dimension> &, at::Tensor&, at::Tensor&)) &
InputLayer_updateGradInput,
"");
m.def("OutputLayer_updateOutput",
(void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor)) &
(void (*)(Metadata<Dimension> &, at::Tensor&, at::Tensor&)) &
OutputLayer_updateOutput,
"");
m.def("OutputLayer_updateGradInput",
(void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor)) &
(void (*)(Metadata<Dimension> &, at::Tensor&, at::Tensor&)) &
OutputLayer_updateGradInput,
"");
m.def("BLInputLayer_updateOutput",
(void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor, long)) &
(void (*)(Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&, long)) &
BLInputLayer_updateOutput,
"");
m.def("BLInputLayer_updateGradInput",
(void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor)) &
(void (*)(Metadata<Dimension> &, at::Tensor&, at::Tensor&)) &
BLInputLayer_updateGradInput,
"");
m.def("BLOutputLayer_updateOutput",
(void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor)) &
(void (*)(Metadata<Dimension> &, at::Tensor&, at::Tensor&)) &
BLOutputLayer_updateOutput,
"");
m.def("BLOutputLayer_updateGradInput",
(void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor)) &
(void (*)(Metadata<Dimension> &, at::Tensor&, at::Tensor&)) &
BLOutputLayer_updateGradInput,
"");
m.def("UnPooling_updateOutput",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, long)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, long)) &
UnPooling_updateOutput,
"");
m.def("UnPooling_updateGradInput",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, long)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, long)) &
UnPooling_updateGradInput,
"");
}
......
......@@ -6,234 +6,234 @@
#include "Metadata/Metadata.h"
double AffineReluTrivialConvolution_updateOutput(at::Tensor input_features,
at::Tensor output_features,
at::Tensor affineWeight,
at::Tensor affineBias,
at::Tensor convWeight);
double AffineReluTrivialConvolution_updateOutput(at::Tensor &input_features,
at::Tensor &output_features,
at::Tensor &affineWeight,
at::Tensor &affineBias,
at::Tensor &convWeight);
void AffineReluTrivialConvolution_backward(
at::Tensor input_features, at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor affineWeight,
at::Tensor d_affineWeight, at::Tensor affineBias, at::Tensor d_affineBias,
at::Tensor convWeight, at::Tensor d_convWeight, bool additiveGrad);
at::Tensor &input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &affineWeight,
at::Tensor &d_affineWeight, at::Tensor &affineBias, at::Tensor &d_affineBias,
at::Tensor &convWeight, at::Tensor &d_convWeight, bool additiveGrad);
void BatchNormalization_updateOutput(
at::Tensor input_features, at::Tensor output_features, at::Tensor saveMean,
at::Tensor saveInvStd, at::Tensor runningMean, at::Tensor runningVar,
at::Tensor weight, at::Tensor bias, double eps, double momentum, bool train,
at::Tensor &input_features, at::Tensor &output_features, at::Tensor &saveMean,
at::Tensor &saveInvStd, at::Tensor &runningMean, at::Tensor &runningVar,
at::Tensor &weight, at::Tensor &bias, double eps, double momentum, bool train,
double leakiness);
void BatchNormalization_backward(
at::Tensor input_features, at::Tensor d_input_features,
at::Tensor output_features, at::Tensor d_output_features,
at::Tensor saveMean, at::Tensor saveInvStd, at::Tensor runningMean,
at::Tensor runningVar, at::Tensor weight, at::Tensor bias,
at::Tensor d_weight, at::Tensor d_bias, double leakiness);
at::Tensor &input_features, at::Tensor &d_input_features,
at::Tensor &output_features, at::Tensor &d_output_features,
at::Tensor &saveMean, at::Tensor &saveInvStd, at::Tensor &runningMean,
at::Tensor &runningVar, at::Tensor &weight, at::Tensor &bias,
at::Tensor &d_weight, at::Tensor &d_bias, double leakiness);
void BatchwiseMultiplicativeDropout_updateOutput(at::Tensor input_features,
at::Tensor output_features,
at::Tensor noise,
void BatchwiseMultiplicativeDropout_updateOutput(at::Tensor &input_features,
at::Tensor &output_features,
at::Tensor &noise,
double alpha);
void BatchwiseMultiplicativeDropout_updateGradInput(
at::Tensor input_features, at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor noise, double alpha);
at::Tensor &input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &noise, double alpha);
void LeakyReLU_updateOutput(at::Tensor input_features,
at::Tensor output_features, double alpha);
void LeakyReLU_updateOutput(at::Tensor &input_features,
at::Tensor &output_features, double alpha);
void LeakyReLU_updateGradInput(at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features, double alpha);
void LeakyReLU_updateGradInput(at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features, double alpha);
double NetworkInNetwork_updateOutput(at::Tensor input_features,
at::Tensor output_features,
at::Tensor weight, at::Tensor bias);
double NetworkInNetwork_updateOutput(at::Tensor &input_features,
at::Tensor &output_features,
at::Tensor &weight, at::Tensor &bias);
void NetworkInNetwork_updateGradInput(at::Tensor d_input_features,
at::Tensor d_output_features,
at::Tensor weight);
void NetworkInNetwork_updateGradInput(at::Tensor &d_input_features,
at::Tensor &d_output_features,
at::Tensor &weight);
void NetworkInNetwork_accGradParameters(at::Tensor input_features,
at::Tensor d_output_features,
at::Tensor d_weight, at::Tensor d_bias);
void NetworkInNetwork_accGradParameters(at::Tensor &input_features,
at::Tensor &d_output_features,
at::Tensor &d_weight, at::Tensor &d_bias);
template <Int Dimension>
void ActivePooling_updateOutput(at::Tensor inputSize, Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features, bool average);
void ActivePooling_updateOutput(at::Tensor &inputSize, Metadata<Dimension> &m,
at::Tensor &input_features,
at::Tensor &output_features, bool average);
template <Int Dimension>
void ActivePooling_updateGradInput(at::Tensor inputSize, Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features, bool average);
void ActivePooling_updateGradInput(at::Tensor &inputSize, Metadata<Dimension> &m,
at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features, bool average);
template <Int Dimension>
void AveragePooling_updateOutput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor poolSize, at::Tensor poolStride,
void AveragePooling_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &poolSize, at::Tensor &poolStride,
Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features,
at::Tensor &input_features,
at::Tensor &output_features,
long nFeaturesToDrop);
template <Int Dimension>
void AveragePooling_updateGradInput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor poolSize, at::Tensor poolStride,
void AveragePooling_updateGradInput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &poolSize, at::Tensor &poolStride,
Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features,
at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features,
long nFeaturesToDrop);
template <Int Dimension>
double Convolution_updateOutput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor filterSize, at::Tensor filterStride,
double Convolution_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &filterSize, at::Tensor &filterStride,
Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features, at::Tensor weight,
at::Tensor bias);
template <Int Dimension>
void Convolution_backward(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor filterSize, at::Tensor filterStride,
Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor weight,
at::Tensor d_weight, at::Tensor d_bias);
template <Int Dimension>
double SubmanifoldConvolution_updateOutput(at::Tensor inputSize,
at::Tensor filterSize,
at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight,
at::Tensor &bias);
template <Int Dimension>
void Convolution_backward(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &filterSize, at::Tensor &filterStride,
Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight,
at::Tensor &d_weight, at::Tensor &d_bias);
template <Int Dimension>
double SubmanifoldConvolution_updateOutput(at::Tensor &inputSize,
at::Tensor &filterSize,
Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features,
at::Tensor weight, at::Tensor bias);
at::Tensor &input_features,
at::Tensor &output_features,
at::Tensor &weight, at::Tensor &bias);
template <Int Dimension>
void SubmanifoldConvolution_backward(
at::Tensor inputSize, at::Tensor filterSize, Metadata<Dimension> &m,
at::Tensor input_features, at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight,
at::Tensor d_bias);
at::Tensor &inputSize, at::Tensor &filterSize, Metadata<Dimension> &m,
at::Tensor &input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight, at::Tensor &d_weight,
at::Tensor &d_bias);
template <Int Dimension>
double PermutohedralSubmanifoldConvolution_updateOutput(
at::Tensor inputSize, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, at::Tensor weight, at::Tensor bias);
at::Tensor &inputSize, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight, at::Tensor &bias);
template <Int Dimension>
void PermutohedralSubmanifoldConvolution_backward(
at::Tensor inputSize, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features, at::Tensor d_output_features,
at::Tensor weight, at::Tensor d_weight, at::Tensor d_bias);
at::Tensor &inputSize, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &d_output_features,
at::Tensor &weight, at::Tensor &d_weight, at::Tensor &d_bias);
template <Int Dimension>
double FullConvolution_updateOutput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize,
at::Tensor filterStride, Metadata<Dimension> &mIn,
Metadata<Dimension> &mOut, at::Tensor input_features,
at::Tensor output_features, at::Tensor weight, at::Tensor bias);
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize,
at::Tensor &filterStride, Metadata<Dimension> &mIn,
Metadata<Dimension> &mOut, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight, at::Tensor &bias);
template <Int Dimension>
void FullConvolution_backward(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor filterSize, at::Tensor filterStride,
void FullConvolution_backward(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &filterSize, at::Tensor &filterStride,
Metadata<Dimension> &mIn,
Metadata<Dimension> &mOut,
at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor weight,
at::Tensor d_weight, at::Tensor d_bias);
at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight,
at::Tensor &d_weight, at::Tensor &d_bias);
template <Int Dimension>
double RandomizedStrideConvolution_updateOutput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize,
at::Tensor filterStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, at::Tensor weight, at::Tensor bias);
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize,
at::Tensor &filterStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight, at::Tensor &bias);
template <Int Dimension>
void RandomizedStrideConvolution_backward(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize,
at::Tensor filterStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features, at::Tensor d_output_features,
at::Tensor weight, at::Tensor d_weight, at::Tensor d_bias);
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize,
at::Tensor &filterStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &d_output_features,
at::Tensor &weight, at::Tensor &d_weight, at::Tensor &d_bias);
template <Int Dimension>
double Deconvolution_updateOutput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize,
at::Tensor filterStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, at::Tensor weight, at::Tensor bias);
template <Int Dimension>
void Deconvolution_backward(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor filterSize, at::Tensor filterStride,
Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor weight,
at::Tensor d_weight, at::Tensor d_bias);
template <Int Dimension>
void InputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor spatialSize,
at::Tensor input_coords, at::Tensor input_features,
at::Tensor output_features, long batchSize,
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize,
at::Tensor &filterStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight, at::Tensor &bias);
template <Int Dimension>
void Deconvolution_backward(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &filterSize, at::Tensor &filterStride,
Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight,
at::Tensor &d_weight, at::Tensor &d_bias);
template <Int Dimension>
void InputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor &spatialSize,
at::Tensor &input_coords, at::Tensor &input_features,
at::Tensor &output_features, long batchSize,
long mode);
template <Int Dimension>
void InputLayer_updateGradInput(Metadata<Dimension> &m,
at::Tensor d_input_features,
at::Tensor d_output_features);
at::Tensor &d_input_features,
at::Tensor &d_output_features);
template <Int Dimension>
void OutputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features);
void OutputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features);
template <Int Dimension>
void OutputLayer_updateGradInput(Metadata<Dimension> &m,
at::Tensor d_input_features,
at::Tensor d_output_features);
at::Tensor &d_input_features,
at::Tensor &d_output_features);
template <Int Dimension>
void BLInputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor spatialSize,
at::Tensor input_coords,
at::Tensor input_features,
at::Tensor output_features, long mode);
void BLInputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor &spatialSize,
at::Tensor &input_coords,
at::Tensor &input_features,
at::Tensor &output_features, long mode);
template <Int Dimension>
void BLInputLayer_updateGradInput(Metadata<Dimension> &m,
at::Tensor d_input_features,
at::Tensor d_output_features);
at::Tensor &d_input_features,
at::Tensor &d_output_features);
template <Int Dimension>
void BLOutputLayer_updateOutput(Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features);
at::Tensor &input_features,
at::Tensor &output_features);
template <Int Dimension>
void BLOutputLayer_updateGradInput(Metadata<Dimension> &m,
at::Tensor d_input_features,
at::Tensor d_output_features);
at::Tensor &d_input_features,
at::Tensor &d_output_features);
template <Int Dimension>
void MaxPooling_updateOutput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor poolSize, at::Tensor poolStride,
Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, long nFeaturesToDrop);
void MaxPooling_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &poolSize, at::Tensor &poolStride,
Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, long nFeaturesToDrop);
template <Int Dimension>
void MaxPooling_updateGradInput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize,
at::Tensor poolStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features, at::Tensor output_features,
at::Tensor d_output_features, long nFeaturesToDrop);
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize,
at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &output_features,
at::Tensor &d_output_features, long nFeaturesToDrop);
template <Int Dimension>
void RandomizedStrideMaxPooling_updateOutput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize,
at::Tensor poolStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, long nFeaturesToDrop);
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize,
at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, long nFeaturesToDrop);
template <Int Dimension>
void RandomizedStrideMaxPooling_updateGradInput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize,
at::Tensor poolStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features, at::Tensor output_features,
at::Tensor d_output_features, long nFeaturesToDrop);
template <Int Dimension>
void SparseToDense_updateOutput(at::Tensor inputSize, Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features, long nPlanes);
template <Int Dimension>
void SparseToDense_updateGradInput(at::Tensor inputSize, Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features);
template <Int Dimension>
void UnPooling_updateOutput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor poolSize, at::Tensor poolStride,
Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, long nFeaturesToDrop);
template <Int Dimension>
void UnPooling_updateGradInput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor poolSize, at::Tensor poolStride,
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize,
at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &output_features,
at::Tensor &d_output_features, long nFeaturesToDrop);
template <Int Dimension>
void SparseToDense_updateOutput(at::Tensor &inputSize, Metadata<Dimension> &m,
at::Tensor &input_features,
at::Tensor &output_features, long nPlanes);
template <Int Dimension>
void SparseToDense_updateGradInput(at::Tensor &inputSize, Metadata<Dimension> &m,
at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features);
template <Int Dimension>
void UnPooling_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &poolSize, at::Tensor &poolStride,
Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, long nFeaturesToDrop);
template <Int Dimension>
void UnPooling_updateGradInput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &poolSize, at::Tensor &poolStride,
Metadata<Dimension> &m,
at::Tensor d_input_features,
at::Tensor d_output_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features,
long nFeaturesToDrop);
void CopyFeaturesHelper_updateOutput(at::Tensor rules, at::Tensor context,
at::Tensor Context);
void CopyFeaturesHelper_updateGradInput(at::Tensor rules, at::Tensor dcontext,
at::Tensor dContext);
void CopyFeaturesHelper_updateOutput(at::Tensor &rules, at::Tensor &context,
at::Tensor &Context);
void CopyFeaturesHelper_updateGradInput(at::Tensor &rules, at::Tensor &dcontext,
at::Tensor &dContext);
......@@ -33,20 +33,20 @@ template class Metadata<6>;
#include "CPU/SparseToDense.cpp"
#include "CPU/UnPooling.cpp"
double AffineReluTrivialConvolution_updateOutput(at::Tensor input_features,
at::Tensor output_features,
at::Tensor affineWeight,
at::Tensor affineBias,
at::Tensor convWeight) {
double AffineReluTrivialConvolution_updateOutput(at::Tensor &input_features,
at::Tensor &output_features,
at::Tensor &affineWeight,
at::Tensor &affineBias,
at::Tensor &convWeight) {
return cpu_AffineReluTrivialConvolution_updateOutput<float>(
input_features, output_features, affineWeight, affineBias, convWeight);
}
void AffineReluTrivialConvolution_backward(
at::Tensor input_features, at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor affineWeight,
at::Tensor d_affineWeight, at::Tensor affineBias, at::Tensor d_affineBias,
at::Tensor convWeight, at::Tensor d_convWeight, bool additiveGrad) {
at::Tensor &input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &affineWeight,
at::Tensor &d_affineWeight, at::Tensor &affineBias, at::Tensor &d_affineBias,
at::Tensor &convWeight, at::Tensor &d_convWeight, bool additiveGrad) {
cpu_AffineReluTrivialConvolution_backward<float>(
input_features, d_input_features, d_output_features, affineWeight,
d_affineWeight, affineBias, d_affineBias, convWeight, d_convWeight,
......@@ -54,9 +54,9 @@ void AffineReluTrivialConvolution_backward(
}
void BatchNormalization_updateOutput(
at::Tensor input_features, at::Tensor output_features, at::Tensor saveMean,
at::Tensor saveInvStd, at::Tensor runningMean, at::Tensor runningVar,
at::Tensor weight, at::Tensor bias, double eps, double momentum, bool train,
at::Tensor &input_features, at::Tensor &output_features, at::Tensor &saveMean,
at::Tensor &saveInvStd, at::Tensor &runningMean, at::Tensor &runningVar,
at::Tensor &weight, at::Tensor &bias, double eps, double momentum, bool train,
double leakiness) {
cpu_BatchNormalization_updateOutput<float>(
input_features, output_features, saveMean, saveInvStd, runningMean,
......@@ -64,182 +64,182 @@ void BatchNormalization_updateOutput(
}
void BatchNormalization_backward(
at::Tensor input_features, at::Tensor d_input_features,
at::Tensor output_features, at::Tensor d_output_features,
at::Tensor saveMean, at::Tensor saveInvStd, at::Tensor runningMean,
at::Tensor runningVar, at::Tensor weight, at::Tensor bias,
at::Tensor d_weight, at::Tensor d_bias, double leakiness) {
at::Tensor &input_features, at::Tensor &d_input_features,
at::Tensor &output_features, at::Tensor &d_output_features,
at::Tensor &saveMean, at::Tensor &saveInvStd, at::Tensor &runningMean,
at::Tensor &runningVar, at::Tensor &weight, at::Tensor &bias,
at::Tensor &d_weight, at::Tensor &d_bias, double leakiness) {
cpu_BatchNormalization_backward<float>(
input_features, d_input_features, output_features, d_output_features,
saveMean, saveInvStd, runningMean, runningVar, weight, bias, d_weight,
d_bias, leakiness);
}
void BatchwiseMultiplicativeDropout_updateOutput(at::Tensor input_features,
at::Tensor output_features,
at::Tensor noise,
void BatchwiseMultiplicativeDropout_updateOutput(at::Tensor &input_features,
at::Tensor &output_features,
at::Tensor &noise,
double alpha) {
cpu_BatchwiseMultiplicativeDropout_updateOutput<float>(
input_features, output_features, noise, alpha);
}
void BatchwiseMultiplicativeDropout_updateGradInput(
at::Tensor input_features, at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor noise, double alpha) {
at::Tensor &input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &noise, double alpha) {
cpu_BatchwiseMultiplicativeDropout_updateGradInput<float>(
input_features, d_input_features, d_output_features, noise, alpha);
}
void LeakyReLU_updateOutput(at::Tensor input_features,
at::Tensor output_features, double alpha) {
void LeakyReLU_updateOutput(at::Tensor &input_features,
at::Tensor &output_features, double alpha) {
cpu_LeakyReLU_updateOutput<float>(input_features, output_features, alpha);
}
void LeakyReLU_updateGradInput(at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features, double alpha) {
void LeakyReLU_updateGradInput(at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features, double alpha) {
cpu_LeakyReLU_updateGradInput<float>(input_features, d_input_features,
d_output_features, alpha);
}
double NetworkInNetwork_updateOutput(at::Tensor input_features,
at::Tensor output_features,
at::Tensor weight, at::Tensor bias) {
double NetworkInNetwork_updateOutput(at::Tensor &input_features,
at::Tensor &output_features,
at::Tensor &weight, at::Tensor &bias) {
return cpu_NetworkInNetwork_updateOutput<float>(
input_features, output_features, weight, bias);
}
void NetworkInNetwork_updateGradInput(at::Tensor d_input_features,
at::Tensor d_output_features,
at::Tensor weight) {
void NetworkInNetwork_updateGradInput(at::Tensor &d_input_features,
at::Tensor &d_output_features,
at::Tensor &weight) {
cpu_NetworkInNetwork_updateGradInput<float>(d_input_features,
d_output_features, weight);
}
void NetworkInNetwork_accGradParameters(at::Tensor input_features,
at::Tensor d_output_features,
at::Tensor d_weight,
at::Tensor d_bias) {
void NetworkInNetwork_accGradParameters(at::Tensor &input_features,
at::Tensor &d_output_features,
at::Tensor &d_weight,
at::Tensor &d_bias) {
cpu_NetworkInNetwork_accGradParameters<float>(
input_features, d_output_features, d_weight, d_bias);
}
template <Int Dimension>
void ActivePooling_updateOutput(at::Tensor inputSize, Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features, bool average) {
void ActivePooling_updateOutput(at::Tensor &inputSize, Metadata<Dimension> &m,
at::Tensor &input_features,
at::Tensor &output_features, bool average) {
cpu_ActivePooling_updateOutput<float, Dimension>(inputSize, m, input_features,
output_features, average);
}
template <Int Dimension>
void ActivePooling_updateGradInput(at::Tensor inputSize, Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features, bool average) {
void ActivePooling_updateGradInput(at::Tensor &inputSize, Metadata<Dimension> &m,
at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features, bool average) {
return cpu_ActivePooling_updateGradInput<float, Dimension>(
inputSize, m, input_features, d_input_features, d_output_features,
average);
}
template <Int Dimension>
void AveragePooling_updateOutput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor poolSize, at::Tensor poolStride,
void AveragePooling_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &poolSize, at::Tensor &poolStride,
Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features,
at::Tensor &input_features,
at::Tensor &output_features,
long nFeaturesToDrop) {
cpu_AveragePooling_updateOutput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features,
output_features, nFeaturesToDrop);
}
template <Int Dimension>
void AveragePooling_updateGradInput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor poolSize, at::Tensor poolStride,
void AveragePooling_updateGradInput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &poolSize, at::Tensor &poolStride,
Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features,
at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features,
long nFeaturesToDrop) {
cpu_AveragePooling_updateGradInput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features,
d_input_features, d_output_features, nFeaturesToDrop);
}
template <Int Dimension>
double Convolution_updateOutput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor filterSize, at::Tensor filterStride,
double Convolution_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &filterSize, at::Tensor &filterStride,
Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features, at::Tensor weight,
at::Tensor bias) {
at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight,
at::Tensor &bias) {
return cpu_Convolution_updateOutput<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features,
output_features, weight, bias);
}
template <Int Dimension>
void Convolution_backward(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor filterSize, at::Tensor filterStride,
Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor weight,
at::Tensor d_weight, at::Tensor d_bias) {
void Convolution_backward(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &filterSize, at::Tensor &filterStride,
Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight,
at::Tensor &d_weight, at::Tensor &d_bias) {
cpu_Convolution_backward<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features,
d_input_features, d_output_features, weight, d_weight, d_bias);
}
template <Int Dimension>
double SubmanifoldConvolution_updateOutput(at::Tensor inputSize,
at::Tensor filterSize,
double SubmanifoldConvolution_updateOutput(at::Tensor &inputSize,
at::Tensor &filterSize,
Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features,
at::Tensor weight, at::Tensor bias) {
at::Tensor &input_features,
at::Tensor &output_features,
at::Tensor &weight, at::Tensor &bias) {
return cpu_SubmanifoldConvolution_updateOutput<float, Dimension>(
inputSize, filterSize, m, input_features, output_features, weight, bias);
}
template <Int Dimension>
void SubmanifoldConvolution_backward(
at::Tensor inputSize, at::Tensor filterSize, Metadata<Dimension> &m,
at::Tensor input_features, at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight,
at::Tensor d_bias) {
at::Tensor &inputSize, at::Tensor &filterSize, Metadata<Dimension> &m,
at::Tensor &input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight, at::Tensor &d_weight,
at::Tensor &d_bias) {
cpu_SubmanifoldConvolution_backward<float, Dimension>(
inputSize, filterSize, m, input_features, d_input_features,
d_output_features, weight, d_weight, d_bias);
}
template <Int Dimension>
double PermutohedralSubmanifoldConvolution_updateOutput(
at::Tensor inputSize, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, at::Tensor weight, at::Tensor bias) {
at::Tensor &inputSize, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight, at::Tensor &bias) {
return cpu_PermutohedralSubmanifoldConvolution_updateOutput<float, Dimension>(
inputSize, m, input_features, output_features, weight, bias);
}
template <Int Dimension>
void PermutohedralSubmanifoldConvolution_backward(
at::Tensor inputSize, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features, at::Tensor d_output_features,
at::Tensor weight, at::Tensor d_weight, at::Tensor d_bias) {
at::Tensor &inputSize, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &d_output_features,
at::Tensor &weight, at::Tensor &d_weight, at::Tensor &d_bias) {
cpu_PermutohedralSubmanifoldConvolution_backward<float, Dimension>(
inputSize, m, input_features, d_input_features, d_output_features, weight,
d_weight, d_bias);
}
template <Int Dimension>
double FullConvolution_updateOutput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize,
at::Tensor filterStride, Metadata<Dimension> &mIn,
Metadata<Dimension> &mOut, at::Tensor input_features,
at::Tensor output_features, at::Tensor weight, at::Tensor bias) {
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize,
at::Tensor &filterStride, Metadata<Dimension> &mIn,
Metadata<Dimension> &mOut, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight, at::Tensor &bias) {
return cpu_FullConvolution_updateOutput<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, mIn, mOut,
input_features, output_features, weight, bias);
}
template <Int Dimension>
void FullConvolution_backward(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor filterSize, at::Tensor filterStride,
void FullConvolution_backward(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &filterSize, at::Tensor &filterStride,
Metadata<Dimension> &mIn,
Metadata<Dimension> &mOut,
at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor weight,
at::Tensor d_weight, at::Tensor d_bias) {
at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight,
at::Tensor &d_weight, at::Tensor &d_bias) {
cpu_FullConvolution_backward<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, mIn, mOut,
input_features, d_input_features, d_output_features, weight, d_weight,
......@@ -247,47 +247,47 @@ void FullConvolution_backward(at::Tensor inputSize, at::Tensor outputSize,
}
template <Int Dimension>
double RandomizedStrideConvolution_updateOutput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize,
at::Tensor filterStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, at::Tensor weight, at::Tensor bias) {
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize,
at::Tensor &filterStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight, at::Tensor &bias) {
return cpu_RandomizedStrideConvolution_updateOutput<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features,
output_features, weight, bias);
}
template <Int Dimension>
void RandomizedStrideConvolution_backward(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize,
at::Tensor filterStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features, at::Tensor d_output_features,
at::Tensor weight, at::Tensor d_weight, at::Tensor d_bias) {
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize,
at::Tensor &filterStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &d_output_features,
at::Tensor &weight, at::Tensor &d_weight, at::Tensor &d_bias) {
cpu_RandomizedStrideConvolution_backward<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features,
d_input_features, d_output_features, weight, d_weight, d_bias);
}
template <Int Dimension>
double Deconvolution_updateOutput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize,
at::Tensor filterStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, at::Tensor weight, at::Tensor bias) {
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize,
at::Tensor &filterStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight, at::Tensor &bias) {
return cpu_Deconvolution_updateOutput<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features,
output_features, weight, bias);
}
template <Int Dimension>
void Deconvolution_backward(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor filterSize, at::Tensor filterStride,
Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor weight,
at::Tensor d_weight, at::Tensor d_bias) {
void Deconvolution_backward(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &filterSize, at::Tensor &filterStride,
Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight,
at::Tensor &d_weight, at::Tensor &d_bias) {
cpu_Deconvolution_backward<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features,
d_input_features, d_output_features, weight, d_weight, d_bias);
}
template <Int Dimension>
void InputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor spatialSize,
at::Tensor input_coords, at::Tensor input_features,
at::Tensor output_features, long batchSize,
void InputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor &spatialSize,
at::Tensor &input_coords, at::Tensor &input_features,
at::Tensor &output_features, long batchSize,
long mode) {
cpu_InputLayer_updateOutput<float, Dimension>(m, spatialSize, input_coords,
input_features, output_features,
......@@ -295,121 +295,121 @@ void InputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor spatialSize,
}
template <Int Dimension>
void InputLayer_updateGradInput(Metadata<Dimension> &m,
at::Tensor d_input_features,
at::Tensor d_output_features) {
at::Tensor &d_input_features,
at::Tensor &d_output_features) {
cpu_InputLayer_updateGradInput<float, Dimension>(m, d_input_features,
d_output_features);
}
template <Int Dimension>
void OutputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features) {
void OutputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features) {
cpu_OutputLayer_updateOutput<float, Dimension>(m, input_features,
output_features);
}
template <Int Dimension>
void OutputLayer_updateGradInput(Metadata<Dimension> &m,
at::Tensor d_input_features,
at::Tensor d_output_features) {
at::Tensor &d_input_features,
at::Tensor &d_output_features) {
cpu_OutputLayer_updateGradInput<float, Dimension>(m, d_input_features,
d_output_features);
}
template <Int Dimension>
void BLInputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor spatialSize,
at::Tensor input_coords,
at::Tensor input_features,
at::Tensor output_features, long mode) {
void BLInputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor &spatialSize,
at::Tensor &input_coords,
at::Tensor &input_features,
at::Tensor &output_features, long mode) {
cpu_BLInputLayer_updateOutput<float, Dimension>(
m, spatialSize, input_coords, input_features, output_features, mode);
}
template <Int Dimension>
void BLInputLayer_updateGradInput(Metadata<Dimension> &m,
at::Tensor d_input_features,
at::Tensor d_output_features) {
at::Tensor &d_input_features,
at::Tensor &d_output_features) {
cpu_BLInputLayer_updateGradInput<float, Dimension>(m, d_input_features,
d_output_features);
}
template <Int Dimension>
void BLOutputLayer_updateOutput(Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features) {
at::Tensor &input_features,
at::Tensor &output_features) {
cpu_BLOutputLayer_updateOutput<float, Dimension>(m, input_features,
output_features);
}
template <Int Dimension>
void BLOutputLayer_updateGradInput(Metadata<Dimension> &m,
at::Tensor d_input_features,
at::Tensor d_output_features) {
at::Tensor &d_input_features,
at::Tensor &d_output_features) {
cpu_BLOutputLayer_updateGradInput<float, Dimension>(m, d_input_features,
d_output_features);
}
template <Int Dimension>
void MaxPooling_updateOutput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor poolSize, at::Tensor poolStride,
Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, long nFeaturesToDrop) {
void MaxPooling_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &poolSize, at::Tensor &poolStride,
Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, long nFeaturesToDrop) {
cpu_MaxPooling_updateOutput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features,
output_features, nFeaturesToDrop);
}
template <Int Dimension>
void MaxPooling_updateGradInput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize,
at::Tensor poolStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features, at::Tensor output_features,
at::Tensor d_output_features, long nFeaturesToDrop) {
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize,
at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &output_features,
at::Tensor &d_output_features, long nFeaturesToDrop) {
cpu_MaxPooling_updateGradInput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features,
d_input_features, output_features, d_output_features, nFeaturesToDrop);
}
template <Int Dimension>
void RandomizedStrideMaxPooling_updateOutput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize,
at::Tensor poolStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, long nFeaturesToDrop) {
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize,
at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, long nFeaturesToDrop) {
cpu_RandomizedStrideMaxPooling_updateOutput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features,
output_features, nFeaturesToDrop);
}
template <Int Dimension>
void RandomizedStrideMaxPooling_updateGradInput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize,
at::Tensor poolStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features, at::Tensor output_features,
at::Tensor d_output_features, long nFeaturesToDrop) {
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize,
at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &output_features,
at::Tensor &d_output_features, long nFeaturesToDrop) {
cpu_RandomizedStrideMaxPooling_updateGradInput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features,
d_input_features, output_features, d_output_features, nFeaturesToDrop);
}
template <Int Dimension>
void SparseToDense_updateOutput(at::Tensor inputSize, Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features, long nPlanes) {
void SparseToDense_updateOutput(at::Tensor &inputSize, Metadata<Dimension> &m,
at::Tensor &input_features,
at::Tensor &output_features, long nPlanes) {
cpu_SparseToDense_updateOutput<float, Dimension>(inputSize, m, input_features,
output_features, nPlanes);
}
template <Int Dimension>
void SparseToDense_updateGradInput(at::Tensor inputSize, Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features) {
void SparseToDense_updateGradInput(at::Tensor &inputSize, Metadata<Dimension> &m,
at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features) {
cpu_SparseToDense_updateGradInput<float, Dimension>(
inputSize, m, input_features, d_input_features, d_output_features);
}
template <Int Dimension>
void UnPooling_updateOutput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor poolSize, at::Tensor poolStride,
Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, long nFeaturesToDrop) {
void UnPooling_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &poolSize, at::Tensor &poolStride,
Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, long nFeaturesToDrop) {
cpu_UnPooling_updateOutput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features,
output_features, nFeaturesToDrop);
}
template <Int Dimension>
void UnPooling_updateGradInput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor poolSize, at::Tensor poolStride,
void UnPooling_updateGradInput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &poolSize, at::Tensor &poolStride,
Metadata<Dimension> &m,
at::Tensor d_input_features,
at::Tensor d_output_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features,
long nFeaturesToDrop) {
cpu_UnPooling_updateGradInput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, d_input_features,
......@@ -418,148 +418,148 @@ void UnPooling_updateGradInput(at::Tensor inputSize, at::Tensor outputSize,
#define FOO \
template void ActivePooling_updateOutput<DIMENSION>( \
at::Tensor inputSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, bool average); \
at::Tensor &inputSize, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &output_features, bool average); \
template void ActivePooling_updateGradInput<DIMENSION>( \
at::Tensor inputSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, bool average); \
at::Tensor &inputSize, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &d_input_features, \
at::Tensor &d_output_features, bool average); \
template void AveragePooling_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize, \
at::Tensor poolStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize, \
at::Tensor &poolStride, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &output_features, \
long nFeaturesToDrop); \
template void AveragePooling_updateGradInput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize, \
at::Tensor poolStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, long nFeaturesToDrop); \
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize, \
at::Tensor &poolStride, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &d_input_features, \
at::Tensor &d_output_features, long nFeaturesToDrop); \
template double Convolution_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor weight, at::Tensor bias); \
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize, \
at::Tensor &filterStride, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &output_features, \
at::Tensor &weight, at::Tensor &bias); \
template void Convolution_backward<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight, \
at::Tensor d_bias); \
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize, \
at::Tensor &filterStride, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &d_input_features, \
at::Tensor &d_output_features, at::Tensor &weight, at::Tensor &d_weight, \
at::Tensor &d_bias); \
template double SubmanifoldConvolution_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor filterSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor weight, at::Tensor bias); \
at::Tensor &inputSize, at::Tensor &filterSize, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &output_features, \
at::Tensor &weight, at::Tensor &bias); \
template void SubmanifoldConvolution_backward<DIMENSION>( \
at::Tensor inputSize, at::Tensor filterSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight, \
at::Tensor d_bias); \
at::Tensor &inputSize, at::Tensor &filterSize, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &d_input_features, \
at::Tensor &d_output_features, at::Tensor &weight, at::Tensor &d_weight, \
at::Tensor &d_bias); \
template double PermutohedralSubmanifoldConvolution_updateOutput<DIMENSION>( \
at::Tensor inputSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor weight, at::Tensor bias); \
at::Tensor &inputSize, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &output_features, \
at::Tensor &weight, at::Tensor &bias); \
template void PermutohedralSubmanifoldConvolution_backward<DIMENSION>( \
at::Tensor inputSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight, \
at::Tensor d_bias); \
at::Tensor &inputSize, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &d_input_features, \
at::Tensor &d_output_features, at::Tensor &weight, at::Tensor &d_weight, \
at::Tensor &d_bias); \
template double FullConvolution_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & mIn, \
Metadata<DIMENSION> & mOut, at::Tensor input_features, \
at::Tensor output_features, at::Tensor weight, at::Tensor bias); \
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize, \
at::Tensor &filterStride, Metadata<DIMENSION> & mIn, \
Metadata<DIMENSION> & mOut, at::Tensor &input_features, \
at::Tensor &output_features, at::Tensor &weight, at::Tensor &bias); \
template void FullConvolution_backward<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & mIn, \
Metadata<DIMENSION> & mOut, at::Tensor input_features, \
at::Tensor d_input_features, at::Tensor d_output_features, \
at::Tensor weight, at::Tensor d_weight, at::Tensor d_bias); \
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize, \
at::Tensor &filterStride, Metadata<DIMENSION> & mIn, \
Metadata<DIMENSION> & mOut, at::Tensor &input_features, \
at::Tensor &d_input_features, at::Tensor &d_output_features, \
at::Tensor &weight, at::Tensor &d_weight, at::Tensor &d_bias); \
template double RandomizedStrideConvolution_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor weight, at::Tensor bias); \
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize, \
at::Tensor &filterStride, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &output_features, \
at::Tensor &weight, at::Tensor &bias); \
template void RandomizedStrideConvolution_backward<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight, \
at::Tensor d_bias); \
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize, \
at::Tensor &filterStride, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &d_input_features, \
at::Tensor &d_output_features, at::Tensor &weight, at::Tensor &d_weight, \
at::Tensor &d_bias); \
template double Deconvolution_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor weight, at::Tensor bias); \
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize, \
at::Tensor &filterStride, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &output_features, \
at::Tensor &weight, at::Tensor &bias); \
template void Deconvolution_backward<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight, \
at::Tensor d_bias); \
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize, \
at::Tensor &filterStride, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &d_input_features, \
at::Tensor &d_output_features, at::Tensor &weight, at::Tensor &d_weight, \
at::Tensor &d_bias); \
template void InputLayer_updateOutput<DIMENSION>( \
Metadata<DIMENSION> & m, at::Tensor spatialSize, \
at::Tensor input_coords, at::Tensor input_features, \
at::Tensor output_features, long batchSize, long mode); \
Metadata<DIMENSION> & m, at::Tensor &spatialSize, \
at::Tensor &input_coords, at::Tensor &input_features, \
at::Tensor &output_features, long batchSize, long mode); \
template void InputLayer_updateGradInput<DIMENSION>( \
Metadata<DIMENSION> & m, at::Tensor d_input_features, \
at::Tensor d_output_features); \
Metadata<DIMENSION> & m, at::Tensor &d_input_features, \
at::Tensor &d_output_features); \
template void OutputLayer_updateOutput<DIMENSION>( \
Metadata<DIMENSION> & m, at::Tensor input_features, \
at::Tensor output_features); \
Metadata<DIMENSION> & m, at::Tensor &input_features, \
at::Tensor &output_features); \
template void OutputLayer_updateGradInput<DIMENSION>( \
Metadata<DIMENSION> & m, at::Tensor d_input_features, \
at::Tensor d_output_features); \
Metadata<DIMENSION> & m, at::Tensor &d_input_features, \
at::Tensor &d_output_features); \
template void BLInputLayer_updateOutput<DIMENSION>( \
Metadata<DIMENSION> & m, at::Tensor spatialSize, \
at::Tensor input_coords, at::Tensor input_features, \
at::Tensor output_features, long mode); \
Metadata<DIMENSION> & m, at::Tensor &spatialSize, \
at::Tensor &input_coords, at::Tensor &input_features, \
at::Tensor &output_features, long mode); \
template void BLInputLayer_updateGradInput<DIMENSION>( \
Metadata<DIMENSION> & m, at::Tensor d_input_features, \
at::Tensor d_output_features); \
Metadata<DIMENSION> & m, at::Tensor &d_input_features, \
at::Tensor &d_output_features); \
template void BLOutputLayer_updateOutput<DIMENSION>( \
Metadata<DIMENSION> & m, at::Tensor input_features, \
at::Tensor output_features); \
Metadata<DIMENSION> & m, at::Tensor &input_features, \
at::Tensor &output_features); \
template void BLOutputLayer_updateGradInput<DIMENSION>( \
Metadata<DIMENSION> & m, at::Tensor d_input_features, \
at::Tensor d_output_features); \
Metadata<DIMENSION> & m, at::Tensor &d_input_features, \
at::Tensor &d_output_features); \
template void MaxPooling_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize, \
at::Tensor poolStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize, \
at::Tensor &poolStride, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &output_features, \
long nFeaturesToDrop); \
template void MaxPooling_updateGradInput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize, \
at::Tensor poolStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor output_features, at::Tensor d_output_features, \
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize, \
at::Tensor &poolStride, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &d_input_features, \
at::Tensor &output_features, at::Tensor &d_output_features, \
long nFeaturesToDrop); \
template void RandomizedStrideMaxPooling_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize, \
at::Tensor poolStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize, \
at::Tensor &poolStride, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &output_features, \
long nFeaturesToDrop); \
template void RandomizedStrideMaxPooling_updateGradInput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize, \
at::Tensor poolStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor output_features, at::Tensor d_output_features, \
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize, \
at::Tensor &poolStride, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &d_input_features, \
at::Tensor &output_features, at::Tensor &d_output_features, \
long nFeaturesToDrop); \
template void SparseToDense_updateOutput<DIMENSION>( \
at::Tensor inputSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, long nPlanes); \
at::Tensor &inputSize, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &output_features, long nPlanes); \
template void SparseToDense_updateGradInput<DIMENSION>( \
at::Tensor inputSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features); \
at::Tensor &inputSize, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &d_input_features, \
at::Tensor &d_output_features); \
template void UnPooling_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize, \
at::Tensor poolStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize, \
at::Tensor &poolStride, Metadata<DIMENSION> & m, \
at::Tensor &input_features, at::Tensor &output_features, \
long nFeaturesToDrop); \
template void UnPooling_updateGradInput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize, \
at::Tensor poolStride, Metadata<DIMENSION> & m, \
at::Tensor d_input_features, at::Tensor d_output_features, \
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize, \
at::Tensor &poolStride, Metadata<DIMENSION> & m, \
at::Tensor &d_input_features, at::Tensor &d_output_features, \
long nFeaturesToDrop);
#define DIMENSION 1
......@@ -581,11 +581,11 @@ FOO;
FOO;
#undef DIMENSION
void CopyFeaturesHelper_updateOutput(at::Tensor rules, at::Tensor context,
at::Tensor Context) {
void CopyFeaturesHelper_updateOutput(at::Tensor &rules, at::Tensor &context,
at::Tensor &Context) {
cpu_CopyFeaturesHelper_updateOutput<float>(rules, context, Context);
}
void CopyFeaturesHelper_updateGradInput(at::Tensor rules, at::Tensor dcontext,
at::Tensor dContext) {
void CopyFeaturesHelper_updateGradInput(at::Tensor &rules, at::Tensor &dcontext,
at::Tensor &dContext) {
cpu_CopyFeaturesHelper_updateGradInput<float>(rules, dcontext, dContext);
}
......@@ -48,11 +48,11 @@ template class Metadata<6>;
#include "CUDA/SparseToDense.cpp"
#include "CUDA/UnPooling.cpp"
double AffineReluTrivialConvolution_updateOutput(at::Tensor input_features,
at::Tensor output_features,
at::Tensor affineWeight,
at::Tensor affineBias,
at::Tensor convWeight) {
double AffineReluTrivialConvolution_updateOutput(at::Tensor &input_features,
at::Tensor &output_features,
at::Tensor &affineWeight,
at::Tensor &affineBias,
at::Tensor &convWeight) {
if (input_features.type().is_cuda())
return cuda_AffineReluTrivialConvolution_updateOutput<float>(
input_features, output_features, affineWeight, affineBias, convWeight);
......@@ -62,10 +62,11 @@ double AffineReluTrivialConvolution_updateOutput(at::Tensor input_features,
}
void AffineReluTrivialConvolution_backward(
at::Tensor input_features, at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor affineWeight,
at::Tensor d_affineWeight, at::Tensor affineBias, at::Tensor d_affineBias,
at::Tensor convWeight, at::Tensor d_convWeight, bool additiveGrad) {
at::Tensor &input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &affineWeight,
at::Tensor &d_affineWeight, at::Tensor &affineBias,
at::Tensor &d_affineBias, at::Tensor &convWeight, at::Tensor &d_convWeight,
bool additiveGrad) {
if (d_output_features.type().is_cuda())
cuda_AffineReluTrivialConvolution_backward<float>(
input_features, d_input_features, d_output_features, affineWeight,
......@@ -79,10 +80,10 @@ void AffineReluTrivialConvolution_backward(
}
void BatchNormalization_updateOutput(
at::Tensor input_features, at::Tensor output_features, at::Tensor saveMean,
at::Tensor saveInvStd, at::Tensor runningMean, at::Tensor runningVar,
at::Tensor weight, at::Tensor bias, double eps, double momentum, bool train,
double leakiness) {
at::Tensor &input_features, at::Tensor &output_features,
at::Tensor &saveMean, at::Tensor &saveInvStd, at::Tensor &runningMean,
at::Tensor &runningVar, at::Tensor &weight, at::Tensor &bias, double eps,
double momentum, bool train, double leakiness) {
if (input_features.type().is_cuda())
cuda_BatchNormalization_updateOutput<float>(
input_features, output_features, saveMean, saveInvStd, runningMean,
......@@ -94,11 +95,11 @@ void BatchNormalization_updateOutput(
}
void BatchNormalization_backward(
at::Tensor input_features, at::Tensor d_input_features,
at::Tensor output_features, at::Tensor d_output_features,
at::Tensor saveMean, at::Tensor saveInvStd, at::Tensor runningMean,
at::Tensor runningVar, at::Tensor weight, at::Tensor bias,
at::Tensor d_weight, at::Tensor d_bias, double leakiness) {
at::Tensor &input_features, at::Tensor &d_input_features,
at::Tensor &output_features, at::Tensor &d_output_features,
at::Tensor &saveMean, at::Tensor &saveInvStd, at::Tensor &runningMean,
at::Tensor &runningVar, at::Tensor &weight, at::Tensor &bias,
at::Tensor &d_weight, at::Tensor &d_bias, double leakiness) {
if (d_output_features.type().is_cuda())
cuda_BatchNormalization_backward<float>(
input_features, d_input_features, output_features, d_output_features,
......@@ -111,9 +112,9 @@ void BatchNormalization_backward(
d_bias, leakiness);
}
void BatchwiseMultiplicativeDropout_updateOutput(at::Tensor input_features,
at::Tensor output_features,
at::Tensor noise,
void BatchwiseMultiplicativeDropout_updateOutput(at::Tensor &input_features,
at::Tensor &output_features,
at::Tensor &noise,
double alpha) {
if (input_features.type().is_cuda())
cuda_BatchwiseMultiplicativeDropout_updateOutput<float>(
......@@ -124,8 +125,8 @@ void BatchwiseMultiplicativeDropout_updateOutput(at::Tensor input_features,
}
void BatchwiseMultiplicativeDropout_updateGradInput(
at::Tensor input_features, at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor noise, double alpha) {
at::Tensor &input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &noise, double alpha) {
if (d_output_features.type().is_cuda())
cuda_BatchwiseMultiplicativeDropout_updateGradInput<float>(
input_features, d_input_features, d_output_features, noise, alpha);
......@@ -134,17 +135,17 @@ void BatchwiseMultiplicativeDropout_updateGradInput(
input_features, d_input_features, d_output_features, noise, alpha);
}
void LeakyReLU_updateOutput(at::Tensor input_features,
at::Tensor output_features, double alpha) {
void LeakyReLU_updateOutput(at::Tensor &input_features,
at::Tensor &output_features, double alpha) {
if (input_features.type().is_cuda())
cuda_LeakyReLU_updateOutput<float>(input_features, output_features, alpha);
else
cpu_LeakyReLU_updateOutput<float>(input_features, output_features, alpha);
}
void LeakyReLU_updateGradInput(at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features, double alpha) {
void LeakyReLU_updateGradInput(at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features, double alpha) {
if (d_output_features.type().is_cuda())
cuda_LeakyReLU_updateGradInput<float>(input_features, d_input_features,
d_output_features, alpha);
......@@ -153,9 +154,9 @@ void LeakyReLU_updateGradInput(at::Tensor input_features,
d_output_features, alpha);
}
double NetworkInNetwork_updateOutput(at::Tensor input_features,
at::Tensor output_features,
at::Tensor weight, at::Tensor bias) {
double NetworkInNetwork_updateOutput(at::Tensor &input_features,
at::Tensor &output_features,
at::Tensor &weight, at::Tensor &bias) {
if (input_features.type().is_cuda())
return cuda_NetworkInNetwork_updateOutput<float>(
input_features, output_features, weight, bias);
......@@ -164,9 +165,9 @@ double NetworkInNetwork_updateOutput(at::Tensor input_features,
input_features, output_features, weight, bias);
}
void NetworkInNetwork_updateGradInput(at::Tensor d_input_features,
at::Tensor d_output_features,
at::Tensor weight) {
void NetworkInNetwork_updateGradInput(at::Tensor &d_input_features,
at::Tensor &d_output_features,
at::Tensor &weight) {
if (d_output_features.type().is_cuda())
cuda_NetworkInNetwork_updateGradInput<float>(d_input_features,
d_output_features, weight);
......@@ -175,10 +176,10 @@ void NetworkInNetwork_updateGradInput(at::Tensor d_input_features,
d_output_features, weight);
}
void NetworkInNetwork_accGradParameters(at::Tensor input_features,
at::Tensor d_output_features,
at::Tensor d_weight,
at::Tensor d_bias) {
void NetworkInNetwork_accGradParameters(at::Tensor &input_features,
at::Tensor &d_output_features,
at::Tensor &d_weight,
at::Tensor &d_bias) {
if (d_output_features.type().is_cuda())
cuda_NetworkInNetwork_accGradParameters<float>(
input_features, d_output_features, d_weight, d_bias);
......@@ -187,9 +188,9 @@ void NetworkInNetwork_accGradParameters(at::Tensor input_features,
input_features, d_output_features, d_weight, d_bias);
}
template <Int Dimension>
void ActivePooling_updateOutput(at::Tensor inputSize, Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features, bool average) {
void ActivePooling_updateOutput(at::Tensor &inputSize, Metadata<Dimension> &m,
at::Tensor &input_features,
at::Tensor &output_features, bool average) {
if (input_features.type().is_cuda())
cuda_ActivePooling_updateOutput<float, Dimension>(
inputSize, m, input_features, output_features, average);
......@@ -199,10 +200,9 @@ void ActivePooling_updateOutput(at::Tensor inputSize, Metadata<Dimension> &m,
}
template <Int Dimension>
void ActivePooling_updateGradInput(at::Tensor inputSize, Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features, bool average) {
void ActivePooling_updateGradInput(
at::Tensor &inputSize, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &d_output_features, bool average) {
if (d_output_features.type().is_cuda())
return cuda_ActivePooling_updateGradInput<float, Dimension>(
inputSize, m, input_features, d_input_features, d_output_features,
......@@ -213,11 +213,11 @@ void ActivePooling_updateGradInput(at::Tensor inputSize, Metadata<Dimension> &m,
average);
}
template <Int Dimension>
void AveragePooling_updateOutput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor poolSize, at::Tensor poolStride,
void AveragePooling_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &poolSize, at::Tensor &poolStride,
Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features,
at::Tensor &input_features,
at::Tensor &output_features,
long nFeaturesToDrop) {
if (input_features.type().is_cuda())
cuda_AveragePooling_updateOutput<float, Dimension>(
......@@ -229,13 +229,11 @@ void AveragePooling_updateOutput(at::Tensor inputSize, at::Tensor outputSize,
output_features, nFeaturesToDrop);
}
template <Int Dimension>
void AveragePooling_updateGradInput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor poolSize, at::Tensor poolStride,
Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features,
long nFeaturesToDrop) {
void AveragePooling_updateGradInput(
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize,
at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &d_output_features,
long nFeaturesToDrop) {
if (d_output_features.type().is_cuda())
cuda_AveragePooling_updateGradInput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features,
......@@ -246,12 +244,12 @@ void AveragePooling_updateGradInput(at::Tensor inputSize, at::Tensor outputSize,
d_input_features, d_output_features, nFeaturesToDrop);
}
template <Int Dimension>
double Convolution_updateOutput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor filterSize, at::Tensor filterStride,
Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features, at::Tensor weight,
at::Tensor bias) {
double
Convolution_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &filterSize, at::Tensor &filterStride,
Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight,
at::Tensor &bias) {
if (input_features.type().is_cuda())
return cuda_Convolution_updateOutput<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features,
......@@ -262,12 +260,12 @@ double Convolution_updateOutput(at::Tensor inputSize, at::Tensor outputSize,
output_features, weight, bias);
}
template <Int Dimension>
void Convolution_backward(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor filterSize, at::Tensor filterStride,
Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor weight,
at::Tensor d_weight, at::Tensor d_bias) {
void Convolution_backward(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &filterSize, at::Tensor &filterStride,
Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight,
at::Tensor &d_weight, at::Tensor &d_bias) {
if (d_output_features.type().is_cuda())
cuda_Convolution_backward<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features,
......@@ -278,12 +276,10 @@ void Convolution_backward(at::Tensor inputSize, at::Tensor outputSize,
d_input_features, d_output_features, weight, d_weight, d_bias);
}
template <Int Dimension>
double SubmanifoldConvolution_updateOutput(at::Tensor inputSize,
at::Tensor filterSize,
Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features,
at::Tensor weight, at::Tensor bias) {
double SubmanifoldConvolution_updateOutput(
at::Tensor &inputSize, at::Tensor &filterSize, Metadata<Dimension> &m,
at::Tensor &input_features, at::Tensor &output_features, at::Tensor &weight,
at::Tensor &bias) {
if (input_features.type().is_cuda())
return cuda_SubmanifoldConvolution_updateOutput<float, Dimension>(
inputSize, filterSize, m, input_features, output_features, weight,
......@@ -295,10 +291,10 @@ double SubmanifoldConvolution_updateOutput(at::Tensor inputSize,
}
template <Int Dimension>
void SubmanifoldConvolution_backward(
at::Tensor inputSize, at::Tensor filterSize, Metadata<Dimension> &m,
at::Tensor input_features, at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight,
at::Tensor d_bias) {
at::Tensor &inputSize, at::Tensor &filterSize, Metadata<Dimension> &m,
at::Tensor &input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight, at::Tensor &d_weight,
at::Tensor &d_bias) {
if (d_output_features.type().is_cuda())
cuda_SubmanifoldConvolution_backward<float, Dimension>(
inputSize, filterSize, m, input_features, d_input_features,
......@@ -310,8 +306,8 @@ void SubmanifoldConvolution_backward(
}
template <Int Dimension>
double PermutohedralSubmanifoldConvolution_updateOutput(
at::Tensor inputSize, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, at::Tensor weight, at::Tensor bias) {
at::Tensor &inputSize, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight, at::Tensor &bias) {
if (input_features.type().is_cuda())
return cuda_PermutohedralSubmanifoldConvolution_updateOutput<float,
Dimension>(
......@@ -323,9 +319,9 @@ double PermutohedralSubmanifoldConvolution_updateOutput(
}
template <Int Dimension>
void PermutohedralSubmanifoldConvolution_backward(
at::Tensor inputSize, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features, at::Tensor d_output_features,
at::Tensor weight, at::Tensor d_weight, at::Tensor d_bias) {
at::Tensor &inputSize, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &d_output_features,
at::Tensor &weight, at::Tensor &d_weight, at::Tensor &d_bias) {
if (d_output_features.type().is_cuda())
cuda_PermutohedralSubmanifoldConvolution_backward<float, Dimension>(
inputSize, m, input_features, d_input_features, d_output_features,
......@@ -337,10 +333,10 @@ void PermutohedralSubmanifoldConvolution_backward(
}
template <Int Dimension>
double FullConvolution_updateOutput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize,
at::Tensor filterStride, Metadata<Dimension> &mIn,
Metadata<Dimension> &mOut, at::Tensor input_features,
at::Tensor output_features, at::Tensor weight, at::Tensor bias) {
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize,
at::Tensor &filterStride, Metadata<Dimension> &mIn,
Metadata<Dimension> &mOut, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight, at::Tensor &bias) {
if (input_features.type().is_cuda())
return cuda_FullConvolution_updateOutput<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, mIn, mOut,
......@@ -351,14 +347,14 @@ double FullConvolution_updateOutput(
input_features, output_features, weight, bias);
}
template <Int Dimension>
void FullConvolution_backward(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor filterSize, at::Tensor filterStride,
void FullConvolution_backward(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &filterSize, at::Tensor &filterStride,
Metadata<Dimension> &mIn,
Metadata<Dimension> &mOut,
at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor weight,
at::Tensor d_weight, at::Tensor d_bias) {
at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight,
at::Tensor &d_weight, at::Tensor &d_bias) {
if (d_output_features.type().is_cuda())
cuda_FullConvolution_backward<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, mIn, mOut,
......@@ -372,9 +368,10 @@ void FullConvolution_backward(at::Tensor inputSize, at::Tensor outputSize,
}
template <Int Dimension>
double RandomizedStrideConvolution_updateOutput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize,
at::Tensor filterStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, at::Tensor weight, at::Tensor bias) {
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize,
at::Tensor &filterStride, Metadata<Dimension> &m,
at::Tensor &input_features, at::Tensor &output_features, at::Tensor &weight,
at::Tensor &bias) {
if (input_features.type().is_cuda())
return cuda_RandomizedStrideConvolution_updateOutput<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features,
......@@ -386,10 +383,11 @@ double RandomizedStrideConvolution_updateOutput(
}
template <Int Dimension>
void RandomizedStrideConvolution_backward(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize,
at::Tensor filterStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features, at::Tensor d_output_features,
at::Tensor weight, at::Tensor d_weight, at::Tensor d_bias) {
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &filterSize,
at::Tensor &filterStride, Metadata<Dimension> &m,
at::Tensor &input_features, at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight, at::Tensor &d_weight,
at::Tensor &d_bias) {
if (d_output_features.type().is_cuda())
cuda_RandomizedStrideConvolution_backward<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features,
......@@ -400,10 +398,12 @@ void RandomizedStrideConvolution_backward(
d_input_features, d_output_features, weight, d_weight, d_bias);
}
template <Int Dimension>
double Deconvolution_updateOutput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize,
at::Tensor filterStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, at::Tensor weight, at::Tensor bias) {
double
Deconvolution_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &filterSize, at::Tensor &filterStride,
Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, at::Tensor &weight,
at::Tensor &bias) {
if (input_features.type().is_cuda())
return cuda_Deconvolution_updateOutput<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features,
......@@ -414,12 +414,12 @@ double Deconvolution_updateOutput(
output_features, weight, bias);
}
template <Int Dimension>
void Deconvolution_backward(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor filterSize, at::Tensor filterStride,
Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features, at::Tensor weight,
at::Tensor d_weight, at::Tensor d_bias) {
void Deconvolution_backward(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &filterSize, at::Tensor &filterStride,
Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features, at::Tensor &weight,
at::Tensor &d_weight, at::Tensor &d_bias) {
if (d_output_features.type().is_cuda())
cuda_Deconvolution_backward<float, Dimension>(
inputSize, outputSize, filterSize, filterStride, m, input_features,
......@@ -430,9 +430,10 @@ void Deconvolution_backward(at::Tensor inputSize, at::Tensor outputSize,
d_input_features, d_output_features, weight, d_weight, d_bias);
}
template <Int Dimension>
void InputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor spatialSize,
at::Tensor input_coords, at::Tensor input_features,
at::Tensor output_features, long batchSize,
void InputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor &spatialSize,
at::Tensor &input_coords,
at::Tensor &input_features,
at::Tensor &output_features, long batchSize,
long mode) {
if (input_features.type().is_cuda())
cuda_InputLayer_updateOutput<float, Dimension>(
......@@ -445,8 +446,8 @@ void InputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor spatialSize,
}
template <Int Dimension>
void InputLayer_updateGradInput(Metadata<Dimension> &m,
at::Tensor d_input_features,
at::Tensor d_output_features) {
at::Tensor &d_input_features,
at::Tensor &d_output_features) {
if (d_output_features.type().is_cuda())
cuda_InputLayer_updateGradInput<float, Dimension>(m, d_input_features,
d_output_features);
......@@ -455,8 +456,9 @@ void InputLayer_updateGradInput(Metadata<Dimension> &m,
d_output_features);
}
template <Int Dimension>
void OutputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features) {
void OutputLayer_updateOutput(Metadata<Dimension> &m,
at::Tensor &input_features,
at::Tensor &output_features) {
if (input_features.type().is_cuda())
cuda_OutputLayer_updateOutput<float, Dimension>(m, input_features,
output_features);
......@@ -466,8 +468,8 @@ void OutputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor input_features,
}
template <Int Dimension>
void OutputLayer_updateGradInput(Metadata<Dimension> &m,
at::Tensor d_input_features,
at::Tensor d_output_features) {
at::Tensor &d_input_features,
at::Tensor &d_output_features) {
if (d_output_features.type().is_cuda())
cuda_OutputLayer_updateGradInput<float, Dimension>(m, d_input_features,
d_output_features);
......@@ -476,10 +478,10 @@ void OutputLayer_updateGradInput(Metadata<Dimension> &m,
d_output_features);
}
template <Int Dimension>
void BLInputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor spatialSize,
at::Tensor input_coords,
at::Tensor input_features,
at::Tensor output_features, long mode) {
void BLInputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor &spatialSize,
at::Tensor &input_coords,
at::Tensor &input_features,
at::Tensor &output_features, long mode) {
if (input_features.type().is_cuda())
cuda_BLInputLayer_updateOutput<float, Dimension>(
m, spatialSize, input_coords, input_features, output_features, mode);
......@@ -489,8 +491,8 @@ void BLInputLayer_updateOutput(Metadata<Dimension> &m, at::Tensor spatialSize,
}
template <Int Dimension>
void BLInputLayer_updateGradInput(Metadata<Dimension> &m,
at::Tensor d_input_features,
at::Tensor d_output_features) {
at::Tensor &d_input_features,
at::Tensor &d_output_features) {
if (d_output_features.type().is_cuda())
cuda_BLInputLayer_updateGradInput<float, Dimension>(m, d_input_features,
d_output_features);
......@@ -500,8 +502,8 @@ void BLInputLayer_updateGradInput(Metadata<Dimension> &m,
}
template <Int Dimension>
void BLOutputLayer_updateOutput(Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features) {
at::Tensor &input_features,
at::Tensor &output_features) {
if (input_features.type().is_cuda())
cuda_BLOutputLayer_updateOutput<float, Dimension>(m, input_features,
output_features);
......@@ -511,8 +513,8 @@ void BLOutputLayer_updateOutput(Metadata<Dimension> &m,
}
template <Int Dimension>
void BLOutputLayer_updateGradInput(Metadata<Dimension> &m,
at::Tensor d_input_features,
at::Tensor d_output_features) {
at::Tensor &d_input_features,
at::Tensor &d_output_features) {
if (d_output_features.type().is_cuda())
cuda_BLOutputLayer_updateGradInput<float, Dimension>(m, d_input_features,
d_output_features);
......@@ -521,10 +523,11 @@ void BLOutputLayer_updateGradInput(Metadata<Dimension> &m,
d_output_features);
}
template <Int Dimension>
void MaxPooling_updateOutput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor poolSize, at::Tensor poolStride,
Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, long nFeaturesToDrop) {
void MaxPooling_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &poolSize, at::Tensor &poolStride,
Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features,
long nFeaturesToDrop) {
if (input_features.type().is_cuda())
cuda_MaxPooling_updateOutput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features,
......@@ -536,10 +539,10 @@ void MaxPooling_updateOutput(at::Tensor inputSize, at::Tensor outputSize,
}
template <Int Dimension>
void MaxPooling_updateGradInput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize,
at::Tensor poolStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features, at::Tensor output_features,
at::Tensor d_output_features, long nFeaturesToDrop) {
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize,
at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &output_features,
at::Tensor &d_output_features, long nFeaturesToDrop) {
if (d_output_features.type().is_cuda())
cuda_MaxPooling_updateGradInput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features,
......@@ -551,9 +554,9 @@ void MaxPooling_updateGradInput(
}
template <Int Dimension>
void RandomizedStrideMaxPooling_updateOutput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize,
at::Tensor poolStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, long nFeaturesToDrop) {
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize,
at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, long nFeaturesToDrop) {
if (input_features.type().is_cuda())
cuda_RandomizedStrideMaxPooling_updateOutput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features,
......@@ -565,10 +568,10 @@ void RandomizedStrideMaxPooling_updateOutput(
}
template <Int Dimension>
void RandomizedStrideMaxPooling_updateGradInput(
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize,
at::Tensor poolStride, Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor d_input_features, at::Tensor output_features,
at::Tensor d_output_features, long nFeaturesToDrop) {
at::Tensor &inputSize, at::Tensor &outputSize, at::Tensor &poolSize,
at::Tensor &poolStride, Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &d_input_features, at::Tensor &output_features,
at::Tensor &d_output_features, long nFeaturesToDrop) {
if (d_output_features.type().is_cuda())
cuda_RandomizedStrideMaxPooling_updateGradInput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features,
......@@ -579,9 +582,9 @@ void RandomizedStrideMaxPooling_updateGradInput(
d_input_features, output_features, d_output_features, nFeaturesToDrop);
}
template <Int Dimension>
void SparseToDense_updateOutput(at::Tensor inputSize, Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor output_features, long nPlanes) {
void SparseToDense_updateOutput(at::Tensor &inputSize, Metadata<Dimension> &m,
at::Tensor &input_features,
at::Tensor &output_features, long nPlanes) {
if (input_features.type().is_cuda())
cuda_SparseToDense_updateOutput<float, Dimension>(
inputSize, m, input_features, output_features, nPlanes);
......@@ -590,10 +593,11 @@ void SparseToDense_updateOutput(at::Tensor inputSize, Metadata<Dimension> &m,
inputSize, m, input_features, output_features, nPlanes);
}
template <Int Dimension>
void SparseToDense_updateGradInput(at::Tensor inputSize, Metadata<Dimension> &m,
at::Tensor input_features,
at::Tensor d_input_features,
at::Tensor d_output_features) {
void SparseToDense_updateGradInput(at::Tensor &inputSize,
Metadata<Dimension> &m,
at::Tensor &input_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features) {
if (d_output_features.type().is_cuda())
cuda_SparseToDense_updateGradInput<float, Dimension>(
inputSize, m, input_features, d_input_features, d_output_features);
......@@ -602,10 +606,10 @@ void SparseToDense_updateGradInput(at::Tensor inputSize, Metadata<Dimension> &m,
inputSize, m, input_features, d_input_features, d_output_features);
}
template <Int Dimension>
void UnPooling_updateOutput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor poolSize, at::Tensor poolStride,
Metadata<Dimension> &m, at::Tensor input_features,
at::Tensor output_features, long nFeaturesToDrop) {
void UnPooling_updateOutput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &poolSize, at::Tensor &poolStride,
Metadata<Dimension> &m, at::Tensor &input_features,
at::Tensor &output_features, long nFeaturesToDrop) {
if (input_features.type().is_cuda())
cuda_UnPooling_updateOutput<float, Dimension>(
inputSize, outputSize, poolSize, poolStride, m, input_features,
......@@ -616,11 +620,11 @@ void UnPooling_updateOutput(at::Tensor inputSize, at::Tensor outputSize,
output_features, nFeaturesToDrop);
}
template <Int Dimension>
void UnPooling_updateGradInput(at::Tensor inputSize, at::Tensor outputSize,
at::Tensor poolSize, at::Tensor poolStride,
void UnPooling_updateGradInput(at::Tensor &inputSize, at::Tensor &outputSize,
at::Tensor &poolSize, at::Tensor &poolStride,
Metadata<Dimension> &m,
at::Tensor d_input_features,
at::Tensor d_output_features,
at::Tensor &d_input_features,
at::Tensor &d_output_features,
long nFeaturesToDrop) {
if (d_output_features.type().is_cuda())
cuda_UnPooling_updateGradInput<float, Dimension>(
......@@ -634,148 +638,152 @@ void UnPooling_updateGradInput(at::Tensor inputSize, at::Tensor outputSize,
#define FOO \
template void ActivePooling_updateOutput<DIMENSION>( \
at::Tensor inputSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, bool average); \
at::Tensor & inputSize, Metadata<DIMENSION> & m, \
at::Tensor & input_features, at::Tensor & output_features, \
bool average); \
template void ActivePooling_updateGradInput<DIMENSION>( \
at::Tensor inputSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, bool average); \
at::Tensor & inputSize, Metadata<DIMENSION> & m, \
at::Tensor & input_features, at::Tensor & d_input_features, \
at::Tensor & d_output_features, bool average); \
template void AveragePooling_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize, \
at::Tensor poolStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor & inputSize, at::Tensor & outputSize, at::Tensor & poolSize, \
at::Tensor & poolStride, Metadata<DIMENSION> & m, \
at::Tensor & input_features, at::Tensor & output_features, \
long nFeaturesToDrop); \
template void AveragePooling_updateGradInput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize, \
at::Tensor poolStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, long nFeaturesToDrop); \
at::Tensor & inputSize, at::Tensor & outputSize, at::Tensor & poolSize, \
at::Tensor & poolStride, Metadata<DIMENSION> & m, \
at::Tensor & input_features, at::Tensor & d_input_features, \
at::Tensor & d_output_features, long nFeaturesToDrop); \
template double Convolution_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor weight, at::Tensor bias); \
at::Tensor & inputSize, at::Tensor & outputSize, \
at::Tensor & filterSize, at::Tensor & filterStride, \
Metadata<DIMENSION> & m, at::Tensor & input_features, \
at::Tensor & output_features, at::Tensor & weight, at::Tensor & bias); \
template void Convolution_backward<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight, \
at::Tensor d_bias); \
at::Tensor & inputSize, at::Tensor & outputSize, \
at::Tensor & filterSize, at::Tensor & filterStride, \
Metadata<DIMENSION> & m, at::Tensor & input_features, \
at::Tensor & d_input_features, at::Tensor & d_output_features, \
at::Tensor & weight, at::Tensor & d_weight, at::Tensor & d_bias); \
template double SubmanifoldConvolution_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor filterSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor weight, at::Tensor bias); \
at::Tensor & inputSize, at::Tensor & filterSize, \
Metadata<DIMENSION> & m, at::Tensor & input_features, \
at::Tensor & output_features, at::Tensor & weight, at::Tensor & bias); \
template void SubmanifoldConvolution_backward<DIMENSION>( \
at::Tensor inputSize, at::Tensor filterSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight, \
at::Tensor d_bias); \
at::Tensor & inputSize, at::Tensor & filterSize, \
Metadata<DIMENSION> & m, at::Tensor & input_features, \
at::Tensor & d_input_features, at::Tensor & d_output_features, \
at::Tensor & weight, at::Tensor & d_weight, at::Tensor & d_bias); \
template double PermutohedralSubmanifoldConvolution_updateOutput<DIMENSION>( \
at::Tensor inputSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor weight, at::Tensor bias); \
at::Tensor & inputSize, Metadata<DIMENSION> & m, \
at::Tensor & input_features, at::Tensor & output_features, \
at::Tensor & weight, at::Tensor & bias); \
template void PermutohedralSubmanifoldConvolution_backward<DIMENSION>( \
at::Tensor inputSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight, \
at::Tensor d_bias); \
at::Tensor & inputSize, Metadata<DIMENSION> & m, \
at::Tensor & input_features, at::Tensor & d_input_features, \
at::Tensor & d_output_features, at::Tensor & weight, \
at::Tensor & d_weight, at::Tensor & d_bias); \
template double FullConvolution_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & mIn, \
Metadata<DIMENSION> & mOut, at::Tensor input_features, \
at::Tensor output_features, at::Tensor weight, at::Tensor bias); \
at::Tensor & inputSize, at::Tensor & outputSize, \
at::Tensor & filterSize, at::Tensor & filterStride, \
Metadata<DIMENSION> & mIn, Metadata<DIMENSION> & mOut, \
at::Tensor & input_features, at::Tensor & output_features, \
at::Tensor & weight, at::Tensor & bias); \
template void FullConvolution_backward<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & mIn, \
Metadata<DIMENSION> & mOut, at::Tensor input_features, \
at::Tensor d_input_features, at::Tensor d_output_features, \
at::Tensor weight, at::Tensor d_weight, at::Tensor d_bias); \
at::Tensor & inputSize, at::Tensor & outputSize, \
at::Tensor & filterSize, at::Tensor & filterStride, \
Metadata<DIMENSION> & mIn, Metadata<DIMENSION> & mOut, \
at::Tensor & input_features, at::Tensor & d_input_features, \
at::Tensor & d_output_features, at::Tensor & weight, \
at::Tensor & d_weight, at::Tensor & d_bias); \
template double RandomizedStrideConvolution_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor weight, at::Tensor bias); \
at::Tensor & inputSize, at::Tensor & outputSize, \
at::Tensor & filterSize, at::Tensor & filterStride, \
Metadata<DIMENSION> & m, at::Tensor & input_features, \
at::Tensor & output_features, at::Tensor & weight, at::Tensor & bias); \
template void RandomizedStrideConvolution_backward<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight, \
at::Tensor d_bias); \
at::Tensor & inputSize, at::Tensor & outputSize, \
at::Tensor & filterSize, at::Tensor & filterStride, \
Metadata<DIMENSION> & m, at::Tensor & input_features, \
at::Tensor & d_input_features, at::Tensor & d_output_features, \
at::Tensor & weight, at::Tensor & d_weight, at::Tensor & d_bias); \
template double Deconvolution_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor weight, at::Tensor bias); \
at::Tensor & inputSize, at::Tensor & outputSize, \
at::Tensor & filterSize, at::Tensor & filterStride, \
Metadata<DIMENSION> & m, at::Tensor & input_features, \
at::Tensor & output_features, at::Tensor & weight, at::Tensor & bias); \
template void Deconvolution_backward<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor filterSize, \
at::Tensor filterStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features, at::Tensor weight, at::Tensor d_weight, \
at::Tensor d_bias); \
at::Tensor & inputSize, at::Tensor & outputSize, \
at::Tensor & filterSize, at::Tensor & filterStride, \
Metadata<DIMENSION> & m, at::Tensor & input_features, \
at::Tensor & d_input_features, at::Tensor & d_output_features, \
at::Tensor & weight, at::Tensor & d_weight, at::Tensor & d_bias); \
template void InputLayer_updateOutput<DIMENSION>( \
Metadata<DIMENSION> & m, at::Tensor spatialSize, \
at::Tensor input_coords, at::Tensor input_features, \
at::Tensor output_features, long batchSize, long mode); \
Metadata<DIMENSION> & m, at::Tensor & spatialSize, \
at::Tensor & input_coords, at::Tensor & input_features, \
at::Tensor & output_features, long batchSize, long mode); \
template void InputLayer_updateGradInput<DIMENSION>( \
Metadata<DIMENSION> & m, at::Tensor d_input_features, \
at::Tensor d_output_features); \
Metadata<DIMENSION> & m, at::Tensor & d_input_features, \
at::Tensor & d_output_features); \
template void OutputLayer_updateOutput<DIMENSION>( \
Metadata<DIMENSION> & m, at::Tensor input_features, \
at::Tensor output_features); \
Metadata<DIMENSION> & m, at::Tensor & input_features, \
at::Tensor & output_features); \
template void OutputLayer_updateGradInput<DIMENSION>( \
Metadata<DIMENSION> & m, at::Tensor d_input_features, \
at::Tensor d_output_features); \
Metadata<DIMENSION> & m, at::Tensor & d_input_features, \
at::Tensor & d_output_features); \
template void BLInputLayer_updateOutput<DIMENSION>( \
Metadata<DIMENSION> & m, at::Tensor spatialSize, \
at::Tensor input_coords, at::Tensor input_features, \
at::Tensor output_features, long mode); \
Metadata<DIMENSION> & m, at::Tensor & spatialSize, \
at::Tensor & input_coords, at::Tensor & input_features, \
at::Tensor & output_features, long mode); \
template void BLInputLayer_updateGradInput<DIMENSION>( \
Metadata<DIMENSION> & m, at::Tensor d_input_features, \
at::Tensor d_output_features); \
Metadata<DIMENSION> & m, at::Tensor & d_input_features, \
at::Tensor & d_output_features); \
template void BLOutputLayer_updateOutput<DIMENSION>( \
Metadata<DIMENSION> & m, at::Tensor input_features, \
at::Tensor output_features); \
Metadata<DIMENSION> & m, at::Tensor & input_features, \
at::Tensor & output_features); \
template void BLOutputLayer_updateGradInput<DIMENSION>( \
Metadata<DIMENSION> & m, at::Tensor d_input_features, \
at::Tensor d_output_features); \
Metadata<DIMENSION> & m, at::Tensor & d_input_features, \
at::Tensor & d_output_features); \
template void MaxPooling_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize, \
at::Tensor poolStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor & inputSize, at::Tensor & outputSize, at::Tensor & poolSize, \
at::Tensor & poolStride, Metadata<DIMENSION> & m, \
at::Tensor & input_features, at::Tensor & output_features, \
long nFeaturesToDrop); \
template void MaxPooling_updateGradInput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize, \
at::Tensor poolStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor output_features, at::Tensor d_output_features, \
at::Tensor & inputSize, at::Tensor & outputSize, at::Tensor & poolSize, \
at::Tensor & poolStride, Metadata<DIMENSION> & m, \
at::Tensor & input_features, at::Tensor & d_input_features, \
at::Tensor & output_features, at::Tensor & d_output_features, \
long nFeaturesToDrop); \
template void RandomizedStrideMaxPooling_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize, \
at::Tensor poolStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor & inputSize, at::Tensor & outputSize, at::Tensor & poolSize, \
at::Tensor & poolStride, Metadata<DIMENSION> & m, \
at::Tensor & input_features, at::Tensor & output_features, \
long nFeaturesToDrop); \
template void RandomizedStrideMaxPooling_updateGradInput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize, \
at::Tensor poolStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor output_features, at::Tensor d_output_features, \
at::Tensor & inputSize, at::Tensor & outputSize, at::Tensor & poolSize, \
at::Tensor & poolStride, Metadata<DIMENSION> & m, \
at::Tensor & input_features, at::Tensor & d_input_features, \
at::Tensor & output_features, at::Tensor & d_output_features, \
long nFeaturesToDrop); \
template void SparseToDense_updateOutput<DIMENSION>( \
at::Tensor inputSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, long nPlanes); \
at::Tensor & inputSize, Metadata<DIMENSION> & m, \
at::Tensor & input_features, at::Tensor & output_features, \
long nPlanes); \
template void SparseToDense_updateGradInput<DIMENSION>( \
at::Tensor inputSize, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor d_input_features, \
at::Tensor d_output_features); \
at::Tensor & inputSize, Metadata<DIMENSION> & m, \
at::Tensor & input_features, at::Tensor & d_input_features, \
at::Tensor & d_output_features); \
template void UnPooling_updateOutput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize, \
at::Tensor poolStride, Metadata<DIMENSION> & m, \
at::Tensor input_features, at::Tensor output_features, \
at::Tensor & inputSize, at::Tensor & outputSize, at::Tensor & poolSize, \
at::Tensor & poolStride, Metadata<DIMENSION> & m, \
at::Tensor & input_features, at::Tensor & output_features, \
long nFeaturesToDrop); \
template void UnPooling_updateGradInput<DIMENSION>( \
at::Tensor inputSize, at::Tensor outputSize, at::Tensor poolSize, \
at::Tensor poolStride, Metadata<DIMENSION> & m, \
at::Tensor d_input_features, at::Tensor d_output_features, \
at::Tensor & inputSize, at::Tensor & outputSize, at::Tensor & poolSize, \
at::Tensor & poolStride, Metadata<DIMENSION> & m, \
at::Tensor & d_input_features, at::Tensor & d_output_features, \
long nFeaturesToDrop);
#define DIMENSION 1
......@@ -797,15 +805,15 @@ FOO;
FOO;
#undef DIMENSION
void CopyFeaturesHelper_updateOutput(at::Tensor rules, at::Tensor context,
at::Tensor Context) {
void CopyFeaturesHelper_updateOutput(at::Tensor &rules, at::Tensor &context,
at::Tensor &Context) {
if (context.is_cuda())
cuda_CopyFeaturesHelper_updateOutput<float>(rules, context, Context);
else
cpu_CopyFeaturesHelper_updateOutput<float>(rules, context, Context);
}
void CopyFeaturesHelper_updateGradInput(at::Tensor rules, at::Tensor dcontext,
at::Tensor dContext) {
void CopyFeaturesHelper_updateGradInput(at::Tensor &rules, at::Tensor &dcontext,
at::Tensor &dContext) {
if (dContext.is_cuda())
cuda_CopyFeaturesHelper_updateGradInput<float>(rules, dcontext, dContext);
else
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment