Unverified Commit 2082f213 authored by Ben Graham's avatar Ben Graham Committed by GitHub
Browse files

Merge pull request #118 from facebookresearch/references

Use references where possible
parents 1171aae3 d8c8a060
......@@ -16,10 +16,10 @@ void InputLayer_bp(T *d_input_features, T *d_output_features, Int nRows,
template <typename T, Int Dimension>
void cuda_InputLayer_updateOutput(Metadata<Dimension> &m,
/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor input_coords,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features,
/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &input_coords,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features,
long batchSize, long mode) {
m.inputLayer(spatialSize, input_coords, batchSize, mode);
......@@ -44,8 +44,8 @@ void cuda_InputLayer_updateOutput(Metadata<Dimension> &m,
template <typename T, Int Dimension>
void cuda_InputLayer_updateGradInput(
Metadata<Dimension> &m,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor d_output_features) {
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &d_output_features) {
auto &rules = m.inputLayerRuleBook;
Int nPlanes = d_output_features.size(1);
......@@ -69,8 +69,8 @@ void cuda_InputLayer_updateGradInput(
template <typename T, Int Dimension>
void cuda_OutputLayer_updateOutput(Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features) {
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features) {
auto &rules = m.inputLayerRuleBook;
Int nPlanes = input_features.size(1);
......@@ -93,8 +93,8 @@ void cuda_OutputLayer_updateOutput(Metadata<Dimension> &m,
template <typename T, Int Dimension>
void cuda_OutputLayer_updateGradInput(
Metadata<Dimension> &m,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor d_output_features) {
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &d_output_features) {
auto &rules = m.inputLayerRuleBook;
Int nPlanes = d_output_features.size(1);
......@@ -118,10 +118,10 @@ void cuda_OutputLayer_updateGradInput(
template <typename T, Int Dimension>
void cuda_BLInputLayer_updateOutput(Metadata<Dimension> &m,
/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor input_coords,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features,
/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &input_coords,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features,
long mode) {
m.blLayer(spatialSize, input_coords, mode);
......@@ -148,8 +148,8 @@ void cuda_BLInputLayer_updateOutput(Metadata<Dimension> &m,
template <typename T, Int Dimension>
void cuda_BLInputLayer_updateGradInput(
Metadata<Dimension> &m,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor d_output_features) {
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &d_output_features) {
auto &rules = m.blLayerRuleBook;
Int nPlanes = d_output_features.size(1);
......@@ -176,8 +176,8 @@ void cuda_BLInputLayer_updateGradInput(
template <typename T, Int Dimension>
void cuda_BLOutputLayer_updateOutput(
Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features) {
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features) {
auto &rules = m.blLayerRuleBook;
Int nPlanes = input_features.size(1);
......@@ -201,8 +201,8 @@ void cuda_BLOutputLayer_updateOutput(
template <typename T, Int Dimension>
void cuda_BLOutputLayer_updateGradInput(
Metadata<Dimension> &m,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor d_output_features) {
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &d_output_features) {
auto &rules = m.blLayerRuleBook;
Int nPlanes = d_output_features.size(2);
......
......@@ -11,8 +11,8 @@ void LeakyReLU_bp(T *input_features, T *d_input_features, T *output_features,
Int n, T alpha);
template <typename T>
void cuda_LeakyReLU_updateOutput(/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features,
void cuda_LeakyReLU_updateOutput(/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features,
T alpha) {
output_features.resize_as_(input_features);
auto n = input_features.numel();
......@@ -22,9 +22,9 @@ void cuda_LeakyReLU_updateOutput(/*cuda float*/ at::Tensor input_features,
template <typename T>
void cuda_LeakyReLU_updateGradInput(
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor d_output_features, T alpha) {
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &d_output_features, T alpha) {
d_input_features.resize_as_(d_output_features);
auto n = d_input_features.numel();
LeakyReLU_bp<T>(input_features.data<T>(), d_input_features.data<T>(),
......
......@@ -16,14 +16,14 @@ void cuda_MaxPooling_BackwardPass(T *input_features, T *d_input_features,
template <typename T, Int Dimension>
void cuda_MaxPooling_updateOutput(
/*long*/ at::Tensor inputSize, /*long*/ at::Tensor outputSize,
/*long*/ at::Tensor poolSize,
/*long*/ at::Tensor poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features, long nFeaturesToDrop) {
/*long*/ at::Tensor &inputSize, /*long*/ at::Tensor &outputSize,
/*long*/ at::Tensor &poolSize,
/*long*/ at::Tensor &poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features, long nFeaturesToDrop) {
Int nPlanes = input_features.size(1) - nFeaturesToDrop;
auto _rules =
const auto &_rules =
m.getRuleBook(inputSize, outputSize, poolSize, poolStride, true);
Int nActive = m.getNActive(outputSize);
output_features.resize_({nActive, nPlanes});
......@@ -36,16 +36,16 @@ void cuda_MaxPooling_updateOutput(
}
template <typename T, Int Dimension>
void cuda_MaxPooling_updateGradInput(
/*long*/ at::Tensor inputSize, /*long*/ at::Tensor outputSize,
/*long*/ at::Tensor poolSize,
/*long*/ at::Tensor poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor output_features,
/*cuda float*/ at::Tensor d_output_features, long nFeaturesToDrop) {
/*long*/ at::Tensor &inputSize, /*long*/ at::Tensor &outputSize,
/*long*/ at::Tensor &poolSize,
/*long*/ at::Tensor &poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &output_features,
/*cuda float*/ at::Tensor &d_output_features, long nFeaturesToDrop) {
Int nPlanes = input_features.size(1) - nFeaturesToDrop;
auto _rules =
const auto &_rules =
m.getRuleBook(inputSize, outputSize, poolSize, poolStride, true);
d_input_features.resize_as_(input_features);
d_input_features.zero_();
......@@ -60,14 +60,14 @@ void cuda_MaxPooling_updateGradInput(
}
template <typename T, Int Dimension>
void cuda_RandomizedStrideMaxPooling_updateOutput(
/*long*/ at::Tensor inputSize, /*long*/ at::Tensor outputSize,
/*long*/ at::Tensor poolSize,
/*long*/ at::Tensor poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features, long nFeaturesToDrop) {
/*long*/ at::Tensor &inputSize, /*long*/ at::Tensor &outputSize,
/*long*/ at::Tensor &poolSize,
/*long*/ at::Tensor &poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features, long nFeaturesToDrop) {
Int nPlanes = input_features.size(1) - nFeaturesToDrop;
auto _rules = m.getRandomizedStrideRuleBook(inputSize, outputSize, poolSize,
const auto &_rules = m.getRandomizedStrideRuleBook(inputSize, outputSize, poolSize,
poolStride, true);
Int nActive = m.getNActive(outputSize);
output_features.resize_({nActive, nPlanes});
......@@ -80,16 +80,16 @@ void cuda_RandomizedStrideMaxPooling_updateOutput(
}
template <typename T, Int Dimension>
void cuda_RandomizedStrideMaxPooling_updateGradInput(
/*long*/ at::Tensor inputSize, /*long*/ at::Tensor outputSize,
/*long*/ at::Tensor poolSize,
/*long*/ at::Tensor poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor output_features,
/*cuda float*/ at::Tensor d_output_features, long nFeaturesToDrop) {
/*long*/ at::Tensor &inputSize, /*long*/ at::Tensor &outputSize,
/*long*/ at::Tensor &poolSize,
/*long*/ at::Tensor &poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &output_features,
/*cuda float*/ at::Tensor &d_output_features, long nFeaturesToDrop) {
Int nPlanes = input_features.size(1) - nFeaturesToDrop;
auto _rules = m.getRandomizedStrideRuleBook(inputSize, outputSize, poolSize,
const auto &_rules = m.getRandomizedStrideRuleBook(inputSize, outputSize, poolSize,
poolStride, true);
d_input_features.resize_as_(input_features);
d_input_features.zero_();
......
......@@ -15,9 +15,9 @@ void cuda_SparseToDense_BackwardPass(T *d_input_features, T *d_output_features,
template <typename T, Int Dimension>
void cuda_SparseToDense_updateOutput(
/*long*/ at::Tensor inputSize, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features, long nPlanes) {
/*long*/ at::Tensor &inputSize, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features, long nPlanes) {
{
std::array<long, Dimension + 2> sz;
......@@ -30,7 +30,7 @@ void cuda_SparseToDense_updateOutput(
output_features.zero_();
}
if (input_features.ndimension() == 2) {
auto _rules = m.getSparseToDenseRuleBook(inputSize, true);
const auto &_rules = m.getSparseToDenseRuleBook(inputSize, true);
Int _nPlanes = input_features.size(1);
auto iF = input_features.data<T>();
auto oF = output_features.data<T>();
......@@ -40,16 +40,16 @@ void cuda_SparseToDense_updateOutput(
}
template <typename T, Int Dimension>
void cuda_SparseToDense_updateGradInput(
/*long*/ at::Tensor inputSize, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor d_output_features) {
/*long*/ at::Tensor &inputSize, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &d_output_features) {
d_input_features.resize_as_(input_features);
d_input_features.zero_();
if (input_features.ndimension() == 2) {
auto _rules = m.getSparseToDenseRuleBook(inputSize, true);
const auto &_rules = m.getSparseToDenseRuleBook(inputSize, true);
long spatialVolume = inputSize.prod().data<long>()[0];
Int _nPlanes = d_input_features.size(1);
auto diF = d_input_features.data<T>();
......
......@@ -15,14 +15,14 @@ void cuda_UnPooling_BackwardPass(T *d_input_features, T *d_output_features,
template <typename T, Int Dimension>
void cuda_UnPooling_updateOutput(
/*long*/ at::Tensor inputSize, /*long*/ at::Tensor outputSize,
/*long*/ at::Tensor poolSize,
/*long*/ at::Tensor poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor input_features,
/*cuda float*/ at::Tensor output_features, long nFeaturesToDrop) {
/*long*/ at::Tensor &inputSize, /*long*/ at::Tensor &outputSize,
/*long*/ at::Tensor &poolSize,
/*long*/ at::Tensor &poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor &input_features,
/*cuda float*/ at::Tensor &output_features, long nFeaturesToDrop) {
Int nPlanes = input_features.size(1) - nFeaturesToDrop;
auto _rules =
const auto &_rules =
m.getRuleBook(outputSize, inputSize, poolSize, poolStride, true);
Int nActive = m.getNActive(outputSize);
output_features.resize_({nActive, input_features.size(1) - nFeaturesToDrop});
......@@ -37,14 +37,14 @@ void cuda_UnPooling_updateOutput(
template <typename T, Int Dimension>
void cuda_UnPooling_updateGradInput(
/*long*/ at::Tensor inputSize, /*long*/ at::Tensor outputSize,
/*long*/ at::Tensor poolSize,
/*long*/ at::Tensor poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor d_input_features,
/*cuda float*/ at::Tensor d_output_features, long nFeaturesToDrop) {
/*long*/ at::Tensor &inputSize, /*long*/ at::Tensor &outputSize,
/*long*/ at::Tensor &poolSize,
/*long*/ at::Tensor &poolStride, Metadata<Dimension> &m,
/*cuda float*/ at::Tensor &d_input_features,
/*cuda float*/ at::Tensor &d_output_features, long nFeaturesToDrop) {
Int nPlanes = d_input_features.size(1) - nFeaturesToDrop;
auto _rules =
const auto &_rules =
m.getRuleBook(outputSize, inputSize, poolSize, poolStride, true);
auto diF = d_input_features.data<T>() + nFeaturesToDrop;
......
......@@ -15,7 +15,6 @@ void Convolution_InputSgToRulesAndOutputSg(SparseGrid<dimension> &inputGrid,
long *stride, long *inputSpatialSize,
long *outputSpatialSize) {
rules.resize(volume<dimension>(size));
for (auto const &inIter : inputGrid.mp) {
auto outRegion = OutputRegionCalculator<dimension>(
inIter.first, size, stride, outputSpatialSize);
......
......@@ -22,7 +22,7 @@ template <Int dimension> SparseGrid<dimension>::SparseGrid() : ctr(0) {
mp.set_empty_key(empty_key);
}
template <typename T> T *OptionalTensorData(at::Tensor tensor) {
template <typename T> T *OptionalTensorData(at::Tensor &tensor) {
return tensor.numel() ? tensor.data<T>() : nullptr;
}
......@@ -30,9 +30,9 @@ template <Int dimension>
void addPointToSparseGridMapAndFeatures(SparseGridMap<dimension> &mp,
Point<dimension> p, Int &nActive,
long nPlanes,
/*float*/ at::Tensor features,
/*float*/ at::Tensor &features,
float *vec, bool overwrite) {
auto mapVal = mp.insert(std::make_pair(p, nActive));
if (mapVal.second) {
nActive++;
......@@ -65,16 +65,17 @@ template <Int dimension> void Metadata<dimension>::clear() {
blLayerRuleBook.clear();
}
template <Int dimension>
Int Metadata<dimension>::getNActive(/*long*/ at::Tensor spatialSize) {
Int Metadata<dimension>::getNActive(/*long*/ at::Tensor &spatialSize) {
return nActive[LongTensorToPoint<dimension>(spatialSize)];
};
template <Int dimension>
SparseGrids<dimension> &
Metadata<dimension>::getSparseGrid(/*long*/ at::Tensor spatialSize) {
Metadata<dimension>::getSparseGrid(/*long*/ at::Tensor &spatialSize) {
return grids[LongTensorToPoint<dimension>(spatialSize)];
};
template <Int dimension>
void Metadata<dimension>::setInputSpatialSize(/*long*/ at::Tensor spatialSize) {
void Metadata<dimension>::setInputSpatialSize(
/*long*/ at::Tensor &spatialSize) {
inputSpatialSize = LongTensorToPoint<dimension>(spatialSize);
inputSGs = &grids[inputSpatialSize];
inputNActive = &nActive[inputSpatialSize];
......@@ -85,10 +86,10 @@ template <Int dimension> void Metadata<dimension>::batchAddSample() {
inputSG = &inputSGs->back();
}
template <Int dimension>
void Metadata<dimension>::setInputSpatialLocation(/*float*/ at::Tensor features,
/*long*/ at::Tensor location,
/*float*/ at::Tensor vec,
bool overwrite) {
void Metadata<dimension>::setInputSpatialLocation(
/*float*/ at::Tensor &features,
/*long*/ at::Tensor &location,
/*float*/ at::Tensor &vec, bool overwrite) {
auto p = LongTensorToPoint<dimension>(location);
SparseGridMap<dimension> &mp = inputSG->mp;
Int &nActive = *inputNActive;
......@@ -98,9 +99,9 @@ void Metadata<dimension>::setInputSpatialLocation(/*float*/ at::Tensor features,
}
template <Int dimension>
void Metadata<dimension>::setInputSpatialLocations(
/*float*/ at::Tensor features,
/*long*/ at::Tensor locations,
/*float*/ at::Tensor vecs, bool overwrite) {
/*float*/ at::Tensor &features,
/*long*/ at::Tensor &locations,
/*float*/ at::Tensor &vecs, bool overwrite) {
/* assert(locations.ndimension() == 2 and "locations must be 2
* dimensional!"); */
/* assert(vecs.ndimension() == 2 and "vecs must be 2 dimensional!"); */
......@@ -147,7 +148,7 @@ void Metadata<dimension>::setInputSpatialLocations(
template <Int dimension>
at::Tensor
Metadata<dimension>::getSpatialLocations(/*long*/ at::Tensor spatialSize) {
Metadata<dimension>::getSpatialLocations(/*long*/ at::Tensor &spatialSize) {
Int nActive = getNActive(spatialSize);
auto &SGs = getSparseGrid(spatialSize);
Int batchSize = SGs.size();
......@@ -169,8 +170,8 @@ Metadata<dimension>::getSpatialLocations(/*long*/ at::Tensor spatialSize) {
}
template <Int dimension>
void Metadata<dimension>::createMetadataForDenseToSparse(
/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor nz_, long batchSize) {
/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &nz_, long batchSize) {
clear();
setInputSpatialSize(spatialSize);
inputSGs->resize(batchSize);
......@@ -208,9 +209,9 @@ void Metadata<dimension>::createMetadataForDenseToSparse(
template <Int dimension>
void Metadata<dimension>::sparsifyMetadata(Metadata<dimension> &mOut,
/*long*/ at::Tensor spatialSize,
/*byte*/ at::Tensor filter,
/*long*/ at::Tensor cuSum) {
/*long*/ at::Tensor &spatialSize,
/*byte*/ at::Tensor &filter,
/*long*/ at::Tensor &cuSum) {
// Create a new SparseGrids with fewer entries.
mOut.clear();
auto p = LongTensorToPoint<dimension>(spatialSize);
......@@ -240,7 +241,7 @@ void Metadata<dimension>::sparsifyMetadata(Metadata<dimension> &mOut,
template <Int dimension>
void Metadata<dimension>::appendMetadata(Metadata<dimension> &mAdd,
/*long*/ at::Tensor spatialSize) {
/*long*/ at::Tensor &spatialSize) {
auto p = LongTensorToPoint<dimension>(spatialSize);
auto &sgs1 = grids[p];
auto &sgs2 = mAdd.grids[p];
......@@ -257,7 +258,7 @@ void Metadata<dimension>::appendMetadata(Metadata<dimension> &mAdd,
template <Int dimension>
std::vector<at::Tensor>
Metadata<dimension>::sparsifyCompare(Metadata<dimension> &mGT,
/*long*/ at::Tensor spatialSize) {
/*long*/ at::Tensor &spatialSize) {
auto p = LongTensorToPoint<dimension>(spatialSize);
at::Tensor gt = torch::zeros({nActive[p]}, at::kByte);
at::Tensor ref_map = torch::empty({mGT.nActive[p]}, at::kLong);
......@@ -288,10 +289,10 @@ Metadata<dimension>::sparsifyCompare(Metadata<dimension> &mGT,
// size[dimension] == #feature planes
template <Int dimension>
void Metadata<dimension>::addSampleFromThresholdedTensor(
/*float*/ at::Tensor features_,
/*float*/ at::Tensor tensor_,
/*long*/ at::Tensor offset_,
/*long*/ at::Tensor spatialSize_, float threshold) {
/*float*/ at::Tensor &features_,
/*float*/ at::Tensor &tensor_,
/*long*/ at::Tensor &offset_,
/*long*/ at::Tensor &spatialSize_, float threshold) {
auto &nActive = *inputNActive;
auto &SGs = *inputSGs;
......@@ -404,8 +405,8 @@ template <Int dimension> void Metadata<dimension>::generateRuleBooks2s2() {
}
template <Int dimension>
void Metadata<dimension>::inputLayer(/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor coords, Int batchSize,
void Metadata<dimension>::inputLayer(/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &coords, Int batchSize,
Int mode) {
assert(spatialSize.ndimension() == 1);
assert(spatialSize.size(0) == dimension);
......@@ -417,8 +418,8 @@ void Metadata<dimension>::inputLayer(/*long*/ at::Tensor spatialSize,
*inputNActive);
}
template <Int dimension>
void Metadata<dimension>::blLayer(/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor coords, Int mode) {
void Metadata<dimension>::blLayer(/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &coords, Int mode) {
assert(spatialSize.ndimension() == 1);
assert(spatialSize.size(0) == dimension);
assert(coords.ndimension() == 3);
......@@ -429,8 +430,8 @@ void Metadata<dimension>::blLayer(/*long*/ at::Tensor spatialSize,
}
template <Int dimension>
RuleBook &Metadata<dimension>::getSubmanifoldRuleBook(
/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor size, bool openMP) {
/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &size, bool openMP) {
auto p = TwoLongTensorsToPoint<dimension>(spatialSize, size);
auto &rb = submanifoldRuleBooks[p];
if (rb.empty()) {
......@@ -444,7 +445,7 @@ RuleBook &Metadata<dimension>::getSubmanifoldRuleBook(
}
template <Int dimension>
RuleBook &Metadata<dimension>::getPermutohedralSubmanifoldRuleBook(
/*long*/ at::Tensor spatialSize, bool openMP) {
/*long*/ at::Tensor &spatialSize, bool openMP) {
auto p = LongTensorToPoint<dimension>(spatialSize);
auto &rb = permutohedralRuleBooks[p];
if (rb.empty()) {
......@@ -458,7 +459,7 @@ RuleBook &Metadata<dimension>::getPermutohedralSubmanifoldRuleBook(
}
template <Int dimension>
RuleBook &Metadata<dimension>::getActivePoolingRuleBook(
/*long*/ at::Tensor spatialSize) {
/*long*/ at::Tensor &spatialSize) {
auto spatialSz = LongTensorToPoint<dimension>(spatialSize);
auto &SGs = grids[spatialSz];
auto &rb = activePoolingRuleBooks[spatialSz];
......@@ -468,7 +469,7 @@ RuleBook &Metadata<dimension>::getActivePoolingRuleBook(
}
template <Int dimension>
RuleBook &Metadata<dimension>::getSparseToDenseRuleBook(
/*long*/ at::Tensor spatialSize, bool openMP) {
/*long*/ at::Tensor &spatialSize, bool openMP) {
auto ss = LongTensorToPoint<dimension>(spatialSize);
auto &SGs = grids[ss];
auto &rb = sparseToDenseRuleBooks[ss];
......@@ -484,10 +485,10 @@ RuleBook &Metadata<dimension>::getSparseToDenseRuleBook(
}
template <Int dimension>
RuleBook &Metadata<dimension>::getRuleBook(
/*long*/ at::Tensor inputSpatialSize,
/*long*/ at::Tensor outputSpatialSize,
/*long*/ at::Tensor size,
/*long*/ at::Tensor stride, bool openMP) {
/*long*/ at::Tensor &inputSpatialSize,
/*long*/ at::Tensor &outputSpatialSize,
/*long*/ at::Tensor &size,
/*long*/ at::Tensor &stride, bool openMP) {
auto p = ThreeLongTensorsToPoint<dimension>(inputSpatialSize, size, stride);
auto &rb = ruleBooks[p];
if (rb.empty()) {
......@@ -511,10 +512,10 @@ RuleBook &Metadata<dimension>::getRuleBook(
}
template <Int dimension>
RuleBook &Metadata<dimension>::getFullConvolutionRuleBook(
/*long*/ at::Tensor inputSpatialSize,
/*long*/ at::Tensor outputSpatialSize,
/*long*/ at::Tensor size,
/*long*/ at::Tensor stride, Metadata<dimension> &newM) {
/*long*/ at::Tensor &inputSpatialSize,
/*long*/ at::Tensor &outputSpatialSize,
/*long*/ at::Tensor &size,
/*long*/ at::Tensor &stride, Metadata<dimension> &newM) {
auto &rb = newM.fullConvolutionRuleBook;
if (rb.empty()) {
newM.clear();
......@@ -533,10 +534,10 @@ RuleBook &Metadata<dimension>::getFullConvolutionRuleBook(
template <Int dimension>
RuleBook &Metadata<dimension>::getRandomizedStrideRuleBook(
/*long*/ at::Tensor inputSpatialSize,
/*long*/ at::Tensor outputSpatialSize,
/*long*/ at::Tensor size,
/*long*/ at::Tensor stride, bool openMP) {
/*long*/ at::Tensor &inputSpatialSize,
/*long*/ at::Tensor &outputSpatialSize,
/*long*/ at::Tensor &size,
/*long*/ at::Tensor &stride, bool openMP) {
auto p = ThreeLongTensorsToPoint<dimension>(inputSpatialSize, size, stride);
auto &rb = ruleBooks[p];
if (rb.empty()) {
......@@ -577,7 +578,7 @@ at::Tensor vvl2t(std::vector<std::vector<long>> v) {
template <Int dimension>
std::vector<at::Tensor>
Metadata<dimension>::compareSparseHelper(Metadata<dimension> &mR,
/* long */ at::Tensor spatialSize) {
/* long */ at::Tensor &spatialSize) {
auto p = LongTensorToPoint<dimension>(spatialSize);
auto &sgsL = grids[p];
auto &sgsR = mR.grids[p];
......@@ -624,7 +625,7 @@ at::Tensor vvl2t_(std::vector<std::vector<Int>> v) {
template <Int dimension>
at::Tensor
Metadata<dimension>::copyFeaturesHelper(Metadata<dimension> &mR,
/* long */ at::Tensor spatialSize) {
/* long */ at::Tensor &spatialSize) {
auto p = LongTensorToPoint<dimension>(spatialSize);
auto &sgsL = grids[p];
auto &sgsR = mR.grids[p];
......
......@@ -38,7 +38,7 @@ template <Int dimension>
void addPointToSparseGridMapAndFeatures(SparseGridMap<dimension> &mp,
Point<dimension> p, Int &nActive,
long nPlanes,
/*float*/ at::Tensor features,
/*float*/ at::Tensor &features,
float *vec, bool overwrite);
template <Int dimension> class Metadata {
......@@ -81,45 +81,47 @@ public:
Metadata();
void clear();
Int getNActive(/*long*/ at::Tensor spatialSize);
SparseGrids<dimension> &getSparseGrid(/*long*/ at::Tensor spatialSize);
void setInputSpatialSize(/*long*/ at::Tensor spatialSize);
Int getNActive(/*long*/ at::Tensor &spatialSize);
SparseGrids<dimension> &getSparseGrid(/*long*/ at::Tensor &spatialSize);
void setInputSpatialSize(/*long*/ at::Tensor &spatialSize);
void batchAddSample();
void setInputSpatialLocation(/*float*/ at::Tensor features,
/*long*/ at::Tensor location,
/*float*/ at::Tensor vec, bool overwrite);
void setInputSpatialLocations(/*float*/ at::Tensor features,
/*long*/ at::Tensor locations,
/*float*/ at::Tensor vecs, bool overwrite);
void setInputSpatialLocation(/*float*/ at::Tensor &features,
/*long*/ at::Tensor &location,
/*float*/ at::Tensor &vec, bool overwrite);
void setInputSpatialLocations(/*float*/ at::Tensor &features,
/*long*/ at::Tensor &locations,
/*float*/ at::Tensor &vecs, bool overwrite);
at::Tensor getSpatialLocations(/*long*/ at::Tensor spatialSize);
void createMetadataForDenseToSparse(/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor nz_, long batchSize);
at::Tensor getSpatialLocations(/*long*/ at::Tensor &spatialSize);
void createMetadataForDenseToSparse(/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &nz_, long batchSize);
void sparsifyMetadata(Metadata<dimension> &mOut,
/*long*/ at::Tensor spatialSize,
/*byte*/ at::Tensor filter,
/*long*/ at::Tensor cuSum);
/*long*/ at::Tensor &spatialSize,
/*byte*/ at::Tensor &filter,
/*long*/ at::Tensor &cuSum);
void appendMetadata(Metadata<dimension> &mAdd,
/*long*/ at::Tensor spatialSize);
/*long*/ at::Tensor &spatialSize);
/* std::vector<at::Tensor> sparsifyCompare(Metadata<dimension> &mReference, */
/* std::vector<at::Tensor &> sparsifyCompare(Metadata<dimension> &mReference,
*/
/* Metadata<dimension> &mSparsified,
*/
/* /\*long*\/ at::Tensor spatialSize);
/* /\*long*\/ at::Tensor &
* spatialSize);
*/
std::vector<at::Tensor> sparsifyCompare(Metadata<dimension> &mReference,
/*long*/ at::Tensor spatialSize);
/*long*/ at::Tensor &spatialSize);
// tensor is size[0] x .. x size[dimension-1] x size[dimension]
// size[0] x .. x size[dimension-1] == spatial volume
// size[dimension] == #feature planes
void addSampleFromThresholdedTensor(/*float*/ at::Tensor features_,
/*float*/ at::Tensor tensor_,
/*long*/ at::Tensor offset_,
/*long*/ at::Tensor spatialSize_,
void addSampleFromThresholdedTensor(/*float*/ at::Tensor &features_,
/*float*/ at::Tensor &tensor_,
/*long*/ at::Tensor &offset_,
/*long*/ at::Tensor &spatialSize_,
float threshold);
// 3x3 submanifold convolutions, 3x3/2x2 pooling or strided convolutions
......@@ -128,41 +130,42 @@ public:
// 3x3 submanifold convolutions, 2x2 pooling or strided convolutions
void generateRuleBooks2s2();
void inputLayer(/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor coords, Int batchSize, Int mode);
void blLayer(/*long*/ at::Tensor spatialSize, /*long*/ at::Tensor coords,
void inputLayer(/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &coords, Int batchSize, Int mode);
void blLayer(/*long*/ at::Tensor &spatialSize, /*long*/ at::Tensor &coords,
Int mode);
RuleBook &getSubmanifoldRuleBook(/*long*/ at::Tensor spatialSize,
/*long*/ at::Tensor size, bool openMP);
RuleBook &getPermutohedralSubmanifoldRuleBook(/*long*/ at::Tensor spatialSize,
bool openMP);
RuleBook &getActivePoolingRuleBook(/*long*/ at::Tensor spatialSize);
RuleBook &getSparseToDenseRuleBook(/*long*/ at::Tensor spatialSize,
RuleBook &getSubmanifoldRuleBook(/*long*/ at::Tensor &spatialSize,
/*long*/ at::Tensor &size, bool openMP);
RuleBook &
getPermutohedralSubmanifoldRuleBook(/*long*/ at::Tensor &spatialSize,
bool openMP);
RuleBook &getActivePoolingRuleBook(/*long*/ at::Tensor &spatialSize);
RuleBook &getSparseToDenseRuleBook(/*long*/ at::Tensor &spatialSize,
bool openMP);
RuleBook &getRuleBook(/*long*/ at::Tensor inputSpatialSize,
/*long*/ at::Tensor outputSpatialSize,
/*long*/ at::Tensor size,
/*long*/ at::Tensor stride, bool openMP);
RuleBook &getFullConvolutionRuleBook(/*long*/ at::Tensor inputSpatialSize,
/*long*/ at::Tensor outputSpatialSize,
/*long*/ at::Tensor size,
/*long*/ at::Tensor stride,
RuleBook &getRuleBook(/*long*/ at::Tensor &inputSpatialSize,
/*long*/ at::Tensor &outputSpatialSize,
/*long*/ at::Tensor &size,
/*long*/ at::Tensor &stride, bool openMP);
RuleBook &getFullConvolutionRuleBook(/*long*/ at::Tensor &inputSpatialSize,
/*long*/ at::Tensor &outputSpatialSize,
/*long*/ at::Tensor &size,
/*long*/ at::Tensor &stride,
Metadata<dimension> &newM);
RuleBook &getRandomizedStrideRuleBook(/*long*/ at::Tensor inputSpatialSize,
/*long*/ at::Tensor outputSpatialSize,
/*long*/ at::Tensor size,
/*long*/ at::Tensor stride,
RuleBook &getRandomizedStrideRuleBook(/*long*/ at::Tensor &inputSpatialSize,
/*long*/ at::Tensor &outputSpatialSize,
/*long*/ at::Tensor &size,
/*long*/ at::Tensor &stride,
bool openMP);
std::vector<at::Tensor>
std::vector<at::Tensor >
compareSparseHelper(Metadata<dimension> &mR,
/* long */ at::Tensor spatialSize);
/* long */ at::Tensor &spatialSize);
at::Tensor copyFeaturesHelper(Metadata<dimension> &mR,
/* long */ at::Tensor spatialSize);
/* long */ at::Tensor &spatialSize);
};
template <typename T> T *OptionalTensorData(at::Tensor tensor);
template <typename T> T *OptionalTensorData(at::Tensor &tensor);
template <Int dimension> Int volume(long *point);
#endif
......@@ -23,12 +23,12 @@
template void ActivePooling_ForwardPass<float>(float *input_features,
float *output_features,
Int batchSize, Int maxActive,
Int nPlanes, Int *rules,
Int nPlanes, const Int *rules,
bool average);
template void ActivePooling_BackwardPass<float>(float *d_input_features,
float *d_output_features,
Int batchSize, Int maxActive,
Int nPlanes, Int *rules,
Int nPlanes, const Int *rules,
bool average);
template void dAffineReluTrivialConvolution_forward<float>(
......
......@@ -7,9 +7,9 @@
// Helper function to draw pen strokes with
// nPlanes = 3, feature vector = (1,dx,dy)
void cpu_float_DrawCurve_2(Metadata<2> &m,
/*float*/ at::Tensor features,
/*float*/ at::Tensor stroke) {
at::Tensor location = at::zeros(at::CPU(at::kLong), {2});
/*float*/ at::Tensor &features,
/*float*/ at::Tensor &stroke) {
at::Tensor &location = at::zeros(at::CPU(at::kLong), {2});
auto location_ = location.data<long>();
auto vec = at::zeros(at::CPU(at::kFloat), {3});
......
......@@ -31,168 +31,168 @@ template <Int Dimension> void dimension(py::module &m, const char *name) {
.def("compareSparseHelper", &Metadata<Dimension>::compareSparseHelper)
.def("copyFeaturesHelper", &Metadata<Dimension>::copyFeaturesHelper);
m.def("ActivePooling_updateOutput",
(void (*)(at::Tensor, Metadata<Dimension> &, at::Tensor, at::Tensor,
(void (*)(at::Tensor&, Metadata<Dimension> &, at::Tensor&, at::Tensor&,
bool)) &
ActivePooling_updateOutput,
"");
m.def("ActivePooling_updateGradInput",
(void (*)(at::Tensor, Metadata<Dimension> &, at::Tensor, at::Tensor,
at::Tensor, bool)) &
(void (*)(at::Tensor&, Metadata<Dimension> &, at::Tensor&, at::Tensor&,
at::Tensor&, bool)) &
ActivePooling_updateGradInput,
"");
m.def("AveragePooling_updateOutput",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, long)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, long)) &
AveragePooling_updateOutput,
"");
m.def("AveragePooling_updateGradInput",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
long)) &
AveragePooling_updateGradInput,
"");
m.def("Convolution_updateOutput",
(double (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor)) &
(double (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&)) &
Convolution_updateOutput,
"");
m.def("Convolution_backward",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor, at::Tensor, at::Tensor)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&, at::Tensor&, at::Tensor&)) &
Convolution_backward,
"");
m.def("RandomizedStrideConvolution_updateOutput",
(double (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor)) &
(double (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&)) &
RandomizedStrideConvolution_updateOutput,
"");
m.def("RandomizedStrideConvolution_backward",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor, at::Tensor, at::Tensor)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&, at::Tensor&, at::Tensor&)) &
RandomizedStrideConvolution_backward,
"");
m.def("Deconvolution_updateOutput",
(double (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor)) &
(double (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&)) &
Deconvolution_updateOutput,
"");
m.def("Deconvolution_backward",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor, at::Tensor, at::Tensor)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&, at::Tensor&, at::Tensor&)) &
Deconvolution_backward,
"");
m.def("FullConvolution_updateOutput",
(double (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, Metadata<Dimension> &, at::Tensor,
at::Tensor, at::Tensor, at::Tensor)) &
(double (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, Metadata<Dimension> &, at::Tensor&,
at::Tensor&, at::Tensor&, at::Tensor&)) &
FullConvolution_updateOutput,
"");
m.def("FullConvolution_backward",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, Metadata<Dimension> &, at::Tensor,
at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, Metadata<Dimension> &, at::Tensor&,
at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&)) &
FullConvolution_backward,
"");
m.def("MaxPooling_updateOutput",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, long)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, long)) &
MaxPooling_updateOutput,
"");
m.def("MaxPooling_updateGradInput",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor, long)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&, long)) &
MaxPooling_updateGradInput,
"");
m.def("RandomizedStrideMaxPooling_updateOutput",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, long)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, long)) &
RandomizedStrideMaxPooling_updateOutput,
"");
m.def("RandomizedStrideMaxPooling_updateGradInput",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor, long)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&, long)) &
RandomizedStrideMaxPooling_updateGradInput,
"");
m.def("SparseToDense_updateOutput",
(void (*)(at::Tensor, Metadata<Dimension> &, at::Tensor, at::Tensor,
(void (*)(at::Tensor&, Metadata<Dimension> &, at::Tensor&, at::Tensor&,
long)) &
SparseToDense_updateOutput,
"");
m.def("SparseToDense_updateGradInput",
(void (*)(at::Tensor, Metadata<Dimension> &, at::Tensor, at::Tensor,
at::Tensor)) &
(void (*)(at::Tensor&, Metadata<Dimension> &, at::Tensor&, at::Tensor&,
at::Tensor&)) &
SparseToDense_updateGradInput,
"");
m.def("SubmanifoldConvolution_updateOutput",
(double (*)(at::Tensor, at::Tensor, Metadata<Dimension> &, at::Tensor,
at::Tensor, at::Tensor, at::Tensor)) &
(double (*)(at::Tensor&, at::Tensor&, Metadata<Dimension> &, at::Tensor&,
at::Tensor&, at::Tensor&, at::Tensor&)) &
SubmanifoldConvolution_updateOutput,
"");
m.def("SubmanifoldConvolution_backward",
(void (*)(at::Tensor, at::Tensor, Metadata<Dimension> &, at::Tensor,
at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor)) &
(void (*)(at::Tensor&, at::Tensor&, Metadata<Dimension> &, at::Tensor&,
at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&)) &
SubmanifoldConvolution_backward,
"");
m.def("PermutohedralSubmanifoldConvolution_updateOutput",
(double (*)(at::Tensor, Metadata<Dimension> &, at::Tensor, at::Tensor,
at::Tensor, at::Tensor)) &
(double (*)(at::Tensor&, Metadata<Dimension> &, at::Tensor&, at::Tensor&,
at::Tensor&, at::Tensor&)) &
PermutohedralSubmanifoldConvolution_updateOutput,
"");
m.def("PermutohedralSubmanifoldConvolution_backward",
(void (*)(at::Tensor, Metadata<Dimension> &, at::Tensor, at::Tensor,
at::Tensor, at::Tensor, at::Tensor, at::Tensor)) &
(void (*)(at::Tensor&, Metadata<Dimension> &, at::Tensor&, at::Tensor&,
at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&)) &
PermutohedralSubmanifoldConvolution_backward,
"");
m.def("InputLayer_updateOutput",
(void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor, long, long)) &
(void (*)(Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&, long, long)) &
InputLayer_updateOutput,
"");
m.def("InputLayer_updateGradInput",
(void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor)) &
(void (*)(Metadata<Dimension> &, at::Tensor&, at::Tensor&)) &
InputLayer_updateGradInput,
"");
m.def("OutputLayer_updateOutput",
(void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor)) &
(void (*)(Metadata<Dimension> &, at::Tensor&, at::Tensor&)) &
OutputLayer_updateOutput,
"");
m.def("OutputLayer_updateGradInput",
(void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor)) &
(void (*)(Metadata<Dimension> &, at::Tensor&, at::Tensor&)) &
OutputLayer_updateGradInput,
"");
m.def("BLInputLayer_updateOutput",
(void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor, at::Tensor,
at::Tensor, long)) &
(void (*)(Metadata<Dimension> &, at::Tensor&, at::Tensor&, at::Tensor&,
at::Tensor&, long)) &
BLInputLayer_updateOutput,
"");
m.def("BLInputLayer_updateGradInput",
(void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor)) &
(void (*)(Metadata<Dimension> &, at::Tensor&, at::Tensor&)) &
BLInputLayer_updateGradInput,
"");
m.def("BLOutputLayer_updateOutput",
(void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor)) &
(void (*)(Metadata<Dimension> &, at::Tensor&, at::Tensor&)) &
BLOutputLayer_updateOutput,
"");
m.def("BLOutputLayer_updateGradInput",
(void (*)(Metadata<Dimension> &, at::Tensor, at::Tensor)) &
(void (*)(Metadata<Dimension> &, at::Tensor&, at::Tensor&)) &
BLOutputLayer_updateGradInput,
"");
m.def("UnPooling_updateOutput",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, long)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, long)) &
UnPooling_updateOutput,
"");
m.def("UnPooling_updateGradInput",
(void (*)(at::Tensor, at::Tensor, at::Tensor, at::Tensor,
Metadata<Dimension> &, at::Tensor, at::Tensor, long)) &
(void (*)(at::Tensor&, at::Tensor&, at::Tensor&, at::Tensor&,
Metadata<Dimension> &, at::Tensor&, at::Tensor&, long)) &
UnPooling_updateGradInput,
"");
}
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment