Commit 1df7b845 authored by Benjamin Thomas Graham's avatar Benjamin Thomas Graham
Browse files

3d segmantation

parent f2e3800b
// Copyright 2016-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
#ifndef TH_GENERIC_FILE_
#define TH_GENERIC_FILE_ "generic/CPU/MaxPooling.cpp"
#else
#include "MaxPooling.h"
extern "C" void scn_DR_(MaxPooling_updateOutput)(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THTensor *input_features,
THTensor *output_features, long nFeaturesToDrop) {
SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m)
uInt nPlanes = input_features->size[1] - nFeaturesToDrop;
auto _rules =
_m.getRuleBook(inputSize, outputSize, poolSize, poolStride, true);
uInt nActive = _m.getNActive(outputSize);
THTensor_(resize2d)(output_features, nActive,
input_features->size[1] - nFeaturesToDrop);
THTensor_(zero)(output_features);
auto iF = THTensor_(data)(input_features) + nFeaturesToDrop;
auto oF = THTensor_(data)(output_features);
for (auto &r : _rules) {
uInt nHot = r.size() / 2;
MaxPooling_ForwardPass<real>(iF, oF, nPlanes, input_features->stride[0],
output_features->stride[0], &r[0], nHot);
}
}
extern "C" void scn_DR_(MaxPooling_updateGradInput)(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THTensor *input_features,
THTensor *d_input_features, THTensor *output_features,
THTensor *d_output_features, long nFeaturesToDrop) {
SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m)
uInt nPlanes = input_features->size[1] - nFeaturesToDrop;
auto _rules =
_m.getRuleBook(inputSize, outputSize, poolSize, poolStride, true);
uInt nActive = _m.getNActive(outputSize);
THTensor_(resizeAs)(d_input_features, input_features);
THTensor_(zero)(d_input_features);
auto iF = THTensor_(data)(input_features);
auto oF = THTensor_(data)(output_features);
auto diF = THTensor_(data)(d_input_features);
auto doF = THTensor_(data)(d_output_features);
for (auto &r : _rules) {
uInt nHot = r.size() / 2;
MaxPooling_BackwardPass<real>(iF, diF, oF, doF, nPlanes,
input_features->stride[0],
output_features->stride[0], &r[0], nHot);
}
}
extern "C" void scn_DR_(RandomizedStrideMaxPooling_updateOutput)(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THTensor *input_features,
THTensor *output_features, long nFeaturesToDrop) {
SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m)
uInt nPlanes = input_features->size[1] - nFeaturesToDrop;
auto _rules =
_m.getRandomizedStrideRuleBook(inputSize, outputSize, poolSize, poolStride, true);
uInt nActive = _m.getNActive(outputSize);
THTensor_(resize2d)(output_features, nActive,
input_features->size[1] - nFeaturesToDrop);
THTensor_(zero)(output_features);
auto iF = THTensor_(data)(input_features) + nFeaturesToDrop;
auto oF = THTensor_(data)(output_features);
for (auto &r : _rules) {
uInt nHot = r.size() / 2;
MaxPooling_ForwardPass<real>(iF, oF, nPlanes, input_features->stride[0],
output_features->stride[0], &r[0], nHot);
}
}
extern "C" void scn_DR_(RandomizedStrideMaxPooling_updateGradInput)(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THTensor *input_features,
THTensor *d_input_features, THTensor *output_features,
THTensor *d_output_features, long nFeaturesToDrop) {
SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m)
uInt nPlanes = input_features->size[1] - nFeaturesToDrop;
auto _rules =
_m.getRandomizedStrideRuleBook(inputSize, outputSize, poolSize, poolStride, true);
uInt nActive = _m.getNActive(outputSize);
THTensor_(resizeAs)(d_input_features, input_features);
THTensor_(zero)(d_input_features);
auto iF = THTensor_(data)(input_features);
auto oF = THTensor_(data)(output_features);
auto diF = THTensor_(data)(d_input_features);
auto doF = THTensor_(data)(d_output_features);
for (auto &r : _rules) {
uInt nHot = r.size() / 2;
MaxPooling_BackwardPass<real>(iF, diF, oF, doF, nPlanes,
input_features->stride[0],
output_features->stride[0], &r[0], nHot);
}
}
#endif
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
extern "C" void scn_DR_(SparseToDense_updateOutput)( extern "C" void scn_DR_(SparseToDense_updateOutput)(
THLongTensor *inputSize, void **m, THTensor *input_features, THLongTensor *inputSize, void **m, THTensor *input_features,
THTensor *output_features, void *rulesBuffer, long nPlanes) { THTensor *output_features, long nPlanes) {
SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m) SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m)
...@@ -39,8 +39,7 @@ extern "C" void scn_DR_(SparseToDense_updateOutput)( ...@@ -39,8 +39,7 @@ extern "C" void scn_DR_(SparseToDense_updateOutput)(
} }
extern "C" void scn_DR_(SparseToDense_updateGradInput)( extern "C" void scn_DR_(SparseToDense_updateGradInput)(
THLongTensor *inputSize, void **m, THTensor *input_features, THLongTensor *inputSize, void **m, THTensor *input_features,
THTensor *d_input_features, THTensor *d_output_features, THTensor *d_input_features, THTensor *d_output_features) {
void *rulesBuffer) {
SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m) SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m)
THTensor_(resizeAs)(d_input_features, input_features); THTensor_(resizeAs)(d_input_features, input_features);
......
...@@ -5,19 +5,19 @@ ...@@ -5,19 +5,19 @@
// LICENSE file in the root directory of this source tree. // LICENSE file in the root directory of this source tree.
#ifndef TH_GENERIC_FILE_ #ifndef TH_GENERIC_FILE_
#define TH_GENERIC_FILE_ "generic/CPU/MaxPooling.cpp" #define TH_GENERIC_FILE_ "generic/CPU/UnPooling.cpp"
#else #else
#include "MaxPooling.h" #include "UnPooling.h"
extern "C" void scn_DR_(MaxPooling_updateOutput)( extern "C" void scn_DR_(UnPooling_updateOutput)(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize, THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THTensor *input_features, THLongTensor *poolStride, void **m, THTensor *input_features,
THTensor *output_features, long nFeaturesToDrop, void *rulesBuffer) { THTensor *output_features, long nFeaturesToDrop) {
SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m) SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m)
uInt nPlanes = input_features->size[1] - nFeaturesToDrop; uInt nPlanes = input_features->size[1] - nFeaturesToDrop;
auto _rules = auto _rules =
_m.getRuleBook(inputSize, outputSize, poolSize, poolStride, true); _m.getRuleBook(outputSize, inputSize, poolSize, poolStride, true);
uInt nActive = _m.getNActive(outputSize); uInt nActive = _m.getNActive(outputSize);
THTensor_(resize2d)(output_features, nActive, THTensor_(resize2d)(output_features, nActive,
input_features->size[1] - nFeaturesToDrop); input_features->size[1] - nFeaturesToDrop);
...@@ -28,34 +28,33 @@ extern "C" void scn_DR_(MaxPooling_updateOutput)( ...@@ -28,34 +28,33 @@ extern "C" void scn_DR_(MaxPooling_updateOutput)(
for (auto &r : _rules) { for (auto &r : _rules) {
uInt nHot = r.size() / 2; uInt nHot = r.size() / 2;
MaxPooling_ForwardPass<real>(iF, oF, nPlanes, input_features->stride[0], UnPooling_ForwardPass<real>(iF, oF, nPlanes, input_features->size[1],
output_features->stride[0], &r[0], nHot); output_features->size[1], &r[0], nHot,
_rules.size());
} }
} }
extern "C" void scn_DR_(MaxPooling_updateGradInput)( extern "C" void scn_DR_(UnPooling_updateGradInput)(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize, THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THTensor *input_features, THLongTensor *poolStride, void **m, THTensor *input_features,
THTensor *d_input_features, THTensor *output_features, THTensor *d_input_features, THTensor *d_output_features,
THTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer) { long nFeaturesToDrop) {
SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m) SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m)
uInt nPlanes = input_features->size[1] - nFeaturesToDrop; uInt nPlanes = input_features->size[1] - nFeaturesToDrop;
auto _rules = auto _rules =
_m.getRuleBook(inputSize, outputSize, poolSize, poolStride, true); _m.getRuleBook(outputSize, inputSize, poolSize, poolStride, true);
uInt nActive = _m.getNActive(outputSize); uInt nActive = _m.getNActive(outputSize);
THTensor_(resizeAs)(d_input_features, input_features); THTensor_(resizeAs)(d_input_features, input_features);
THTensor_(zero)(d_input_features); THTensor_(zero)(d_input_features);
auto iF = THTensor_(data)(input_features); auto diF = THTensor_(data)(d_input_features) + nFeaturesToDrop;
auto oF = THTensor_(data)(output_features);
auto diF = THTensor_(data)(d_input_features);
auto doF = THTensor_(data)(d_output_features); auto doF = THTensor_(data)(d_output_features);
for (auto &r : _rules) { for (auto &r : _rules) {
uInt nHot = r.size() / 2; uInt nHot = r.size() / 2;
MaxPooling_BackwardPass<real>(iF, diF, oF, doF, nPlanes, UnPooling_BackwardPass<real>(diF, doF, nPlanes, input_features->size[1],
input_features->stride[0], d_output_features->size[1], &r[0], nHot,
output_features->stride[0], &r[0], nHot); _rules.size());
} }
} }
#endif #endif
// Copyright 2016-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
#ifndef CPU_UNPOOLING_H
#define CPU_UNPOOLING_H
#include "../SparseConvNet.h"
template <typename T>
void UnPooling_ForwardPass(T *input_features, T *output_features, uInt nPlanes,
uInt input_stride, uInt output_stride, uInt *rules,
uInt nHot, uInt filterVolume) {
for (uInt outSite = 0; outSite < nHot; outSite++) {
uInt i = rules[2 * outSite + 1] * input_stride;
uInt o = rules[2 * outSite] * output_stride;
for (uInt plane = 0; plane < nPlanes; plane++)
output_features[o + plane] += input_features[i + plane];
}
}
template <typename T>
void UnPooling_BackwardPass(T *d_input_features, T *d_output_features,
uInt nPlanes, uInt input_stride, uInt output_stride,
uInt *rules, uInt nHot, uInt filterVolume) {
for (uInt outSite = 0; outSite < nHot; outSite++) {
uInt i = rules[2 * outSite + 1] * input_stride;
uInt o = rules[2 * outSite] * output_stride;
for (uInt plane = 0; plane < nPlanes; plane++)
d_input_features[i + plane] += d_output_features[o + plane];
}
}
#endif /* CPU_UNPOOLING_H */
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
extern "C" void scn_DR_(ActivePooling_updateOutput)( extern "C" void scn_DR_(ActivePooling_updateOutput)(
THLongTensor *inputSize, void **m, THCTensor *input_features, THLongTensor *inputSize, void **m, THCTensor *input_features,
THCTensor *output_features, THCITensor *rulesBuffer, bool average) { THCTensor *output_features, bool average) {
SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m) SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m)
uInt nPlanes = input_features->size[1]; uInt nPlanes = input_features->size[1];
auto _rules = _m.getActivePoolingRuleBook(inputSize); auto _rules = _m.getActivePoolingRuleBook(inputSize);
...@@ -20,9 +20,10 @@ extern "C" void scn_DR_(ActivePooling_updateOutput)( ...@@ -20,9 +20,10 @@ extern "C" void scn_DR_(ActivePooling_updateOutput)(
THCTensor_(resize2d)(state, output_features, batchSize, nPlanes); THCTensor_(resize2d)(state, output_features, batchSize, nPlanes);
THCTensor_(zero)(state, output_features); THCTensor_(zero)(state, output_features);
if (THCITensor_nElement(state, rulesBuffer) < 1 << 22) auto rulesBuffer = THCITensor_(new)(state);
THCITensor_resize1d(state, rulesBuffer, 1 << 22); if (THCITensor_(nElement)(state, rulesBuffer) < 1 << 22)
uInt *rb = (uInt *)THCITensor_data(state, rulesBuffer); THCITensor_(resize1d)(state, rulesBuffer, 1 << 22);
uInt *rb = (uInt *)THCITensor_(data)(state, rulesBuffer);
uInt rowBatchSize = std::min((uInt)32768, (1 << 22) / (maxActive + 1)); uInt rowBatchSize = std::min((uInt)32768, (1 << 22) / (maxActive + 1));
THAssert(rowBatchSize > 0); THAssert(rowBatchSize > 0);
...@@ -36,11 +37,12 @@ extern "C" void scn_DR_(ActivePooling_updateOutput)( ...@@ -36,11 +37,12 @@ extern "C" void scn_DR_(ActivePooling_updateOutput)(
ActivePooling_ForwardPass<real>(iF, oF + o * nPlanes, batchSize_, maxActive, ActivePooling_ForwardPass<real>(iF, oF + o * nPlanes, batchSize_, maxActive,
nPlanes, rb, average); nPlanes, rb, average);
} }
THCITensor_(free)(state, rulesBuffer);
} }
extern "C" void scn_DR_(ActivePooling_updateGradInput)( extern "C" void scn_DR_(ActivePooling_updateGradInput)(
THLongTensor *inputSize, void **m, THCTensor *input_features, THLongTensor *inputSize, void **m, THCTensor *input_features,
THCTensor *d_input_features, THCTensor *d_output_features, THCTensor *d_input_features, THCTensor *d_output_features,
THCITensor *rulesBuffer, bool average) { bool average) {
SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m) SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m)
uInt nPlanes = input_features->size[1]; uInt nPlanes = input_features->size[1];
auto _rules = _m.getActivePoolingRuleBook(inputSize); auto _rules = _m.getActivePoolingRuleBook(inputSize);
...@@ -49,9 +51,10 @@ extern "C" void scn_DR_(ActivePooling_updateGradInput)( ...@@ -49,9 +51,10 @@ extern "C" void scn_DR_(ActivePooling_updateGradInput)(
THCTensor_(resizeAs)(state, d_input_features, input_features); THCTensor_(resizeAs)(state, d_input_features, input_features);
THCTensor_(zero)(state, d_input_features); THCTensor_(zero)(state, d_input_features);
if (THCITensor_nElement(state, rulesBuffer) < 1 << 22) auto rulesBuffer = THCITensor_(new)(state);
THCITensor_resize1d(state, rulesBuffer, 1 << 22); if (THCITensor_(nElement)(state, rulesBuffer) < 1 << 22)
uInt *rb = (uInt *)THCITensor_data(state, rulesBuffer); THCITensor_(resize1d)(state, rulesBuffer, 1 << 22);
uInt *rb = (uInt *)THCITensor_(data)(state, rulesBuffer);
uInt rowBatchSize = std::min((uInt)32768, (1 << 22) / (maxActive + 1)); uInt rowBatchSize = std::min((uInt)32768, (1 << 22) / (maxActive + 1));
THAssert(rowBatchSize > 0); THAssert(rowBatchSize > 0);
...@@ -65,5 +68,6 @@ extern "C" void scn_DR_(ActivePooling_updateGradInput)( ...@@ -65,5 +68,6 @@ extern "C" void scn_DR_(ActivePooling_updateGradInput)(
ActivePooling_BackwardPass<real>(diF, doF + o * nPlanes, batchSize_, ActivePooling_BackwardPass<real>(diF, doF + o * nPlanes, batchSize_,
maxActive, nPlanes, rb, average); maxActive, nPlanes, rb, average);
} }
THCITensor_(free)(state, rulesBuffer);
} }
#endif #endif
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
extern "C" void scn_DR_(AveragePooling_updateOutput)( extern "C" void scn_DR_(AveragePooling_updateOutput)(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize, THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCTensor *input_features, THLongTensor *poolStride, void **m, THCTensor *input_features,
THCTensor *output_features, long nFeaturesToDrop, THCITensor *rulesBuffer) { THCTensor *output_features, long nFeaturesToDrop) {
SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m) SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m)
uInt nPlanes = input_features->size[1] - nFeaturesToDrop; uInt nPlanes = input_features->size[1] - nFeaturesToDrop;
...@@ -37,7 +37,7 @@ extern "C" void scn_DR_(AveragePooling_updateGradInput)( ...@@ -37,7 +37,7 @@ extern "C" void scn_DR_(AveragePooling_updateGradInput)(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize, THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCTensor *input_features, THLongTensor *poolStride, void **m, THCTensor *input_features,
THCTensor *d_input_features, THCTensor *d_output_features, THCTensor *d_input_features, THCTensor *d_output_features,
long nFeaturesToDrop, THCITensor *rulesBuffer) { long nFeaturesToDrop) {
SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m) SCN_INITIALIZE_AND_REFERENCE(Metadata<Dimension>, m)
uInt nPlanes = input_features->size[1] - nFeaturesToDrop; uInt nPlanes = input_features->size[1] - nFeaturesToDrop;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment