Commit 66986767 authored by Benjamin Thomas Graham's avatar Benjamin Thomas Graham
Browse files

fixes

parent edf89af3
......@@ -17,14 +17,14 @@ void InputLayer_ForwardPass(T *input_features, T *output_features, Int nRows,
for (row = 0; row < nRows; row++) {
auto nActive = rules[0];
T multiplier = (average and nActive > 0) ? (T)1 / nActive : (T)1;
auto out_f = output_features + row * nPlanes;
auto r = rules + row * (1 + maxActive);
for (Int i = 1; i <= nActive; ++i) {
auto in_f = input_features + nPlanes * rules[i];
auto in_f = input_features + r[i] * nPlanes;
for (Int plane = 0; plane < nPlanes; plane++) {
output_features[plane] += multiplier * in_f[plane];
out_f[plane] += multiplier * in_f[plane];
}
}
output_features += nPlanes;
rules += 1 + maxActive;
}
}
template <typename T>
......@@ -36,13 +36,13 @@ void InputLayer_BackwardPass(T *d_input_features, T *d_output_features,
for (row = 0; row < nRows; row++) {
auto nActive = rules[0];
T multiplier = (average and nActive > 0) ? (T)1 / nActive : (T)1;
auto d_out_f = d_output_features + row * nPlanes;
auto r = rules + row * (1 + maxActive);
for (Int i = 1; i <= nActive; ++i) {
auto d_in_f = d_input_features + nPlanes * rules[i];
auto d_in_f = d_input_features + r[i] * nPlanes;
for (Int plane = 0; plane < nPlanes; plane++)
d_in_f[plane] += multiplier * d_output_features[plane];
d_in_f[plane] += multiplier * d_out_f[plane];
}
d_output_features += nPlanes;
rules += 1 + maxActive;
}
}
......
......@@ -5,10 +5,9 @@
// LICENSE file in the root directory of this source tree.
template <typename T>
void Convolution_fp_bias(T *of, T *b, Int nPlanes, Int nActiveOut);
void Convolution_fp_bias(T *oF, T *b, Int nPlanes, Int nActive);
template <typename T>
void Convolution_bp_bias(T *matrix, T *target, Int nRows, Int nColumns,
Int nCOLUMNS);
void Convolution_bp_bias(T *d_oF, T *d_b, Int nPlanes, Int nActive);
template <typename T>
double dConvolution_forward2(T *inFeatures, T *outFeatures, T *w,
RuleBook _rules, Int input_nPlanes,
......@@ -84,7 +83,7 @@ void cuda_Convolution_backward(
if (d_bias.numel()) {
auto db = d_bias.data<T>();
Convolution_bp_bias(doF, db, op, op, nActiveOut);
Convolution_bp_bias(doF, db, op, nActiveOut);
}
}
}
......@@ -147,7 +146,7 @@ void cuda_SubmanifoldConvolution_backward(
if (d_bias.numel()) {
auto db = d_bias.data<T>();
Convolution_bp_bias(doF, db, op, op, nActive);
Convolution_bp_bias(doF, db, op, nActive);
}
}
}
......@@ -216,7 +215,7 @@ void cuda_FullConvolution_backward(
if (d_bias.numel()) {
auto db = d_bias.data<T>();
Convolution_bp_bias(doF, db, op, op, nActiveOut);
Convolution_bp_bias(doF, db, op, nActiveOut);
}
}
}
......@@ -283,7 +282,7 @@ void cuda_RandomizedStrideConvolution_backward(
if (d_bias.numel()) {
auto db = d_bias.data<T>();
Convolution_bp_bias(doF, db, op, op, nActiveOut);
Convolution_bp_bias(doF, db, op, nActiveOut);
}
}
}
This diff is collapsed.
......@@ -78,7 +78,7 @@ void cuda_Deconvolution_backward(
dDeconvolution_backward_dW2<T>(iF, diF, doF, w, dw, _rules, ip, ip, op, op);
if (d_bias.numel()) {
auto db = d_bias.data<T>();
Convolution_bp_bias(doF, db, op, op, nActiveOut);
Convolution_bp_bias(doF, db, op, nActiveOut);
}
}
}
This diff is collapsed.
......@@ -15,15 +15,15 @@
#include "CUDA/UnPooling.cu"
template void ActivePooling_ForwardPass<float>(float *input_features,
float *output_features,
Int batchSize, Int maxActive,
Int nPlanes, Int *rules,
bool average);
float *output_features,
Int batchSize, Int maxActive,
Int nPlanes, Int *rules,
bool average);
template void ActivePooling_BackwardPass<float>(float *d_input_features,
float *d_output_features,
Int batchSize, Int maxActive,
Int nPlanes, Int *rules,
bool average);
float *d_output_features,
Int batchSize, Int maxActive,
Int nPlanes, Int *rules,
bool average);
template void dAffineReluTrivialConvolution_forward<float>(
float *inFeatures, float *outFeatures, float *affineWeight,
......@@ -43,10 +43,10 @@ template void cuda_AveragePooling_BackwardPass<float>(
float *d_input_features, float *d_output_features, Int nPlanes,
Int input_stride, Int output_stride, RuleBook _rules, Int filterVolume);
template void Convolution_fp_bias<float>(float *of, float *b, Int op,
Int nActive);
template void Convolution_bp_bias<float>(float *matrix, float *target,
Int nRows, Int nColumns, Int nCOLUMNS);
template void Convolution_fp_bias<float>(float *oF, float *b, Int nPlanes,
Int nActive);
template void Convolution_bp_bias<float>(float *d_oF, float *d_b,
Int nPlanes, Int nActive);
template double dConvolution_forward2<float>(
float *inFeatures, float *outFeatures, float *w, RuleBook _rules,
Int input_nPlanes, Int input_stride, Int output_nPlanes, Int output_stride);
......@@ -66,65 +66,65 @@ template void dDeconvolution_backward_dW2<float>(
Int output_nPlanes, Int output_stride);
template void InputLayer_fp<float>(float *input_features,
float *output_features, Int nRows,
Int maxActive, Int nPlanes, Int *rules_cpu,
Int *rules_gpu, bool average);
float *output_features, Int nRows,
Int maxActive, Int nPlanes, Int *rules_cpu,
Int *rules_gpu, bool average);
template void InputLayer_bp<float>(float *d_input_features,
float *d_output_features, Int nRows,
Int maxActive, Int nPlanes, Int *rules_cpu,
Int *rules_gpu, bool average);
float *d_output_features, Int nRows,
Int maxActive, Int nPlanes, Int *rules_cpu,
Int *rules_gpu, bool average);
template void LeakyReLU_fp<float>(float *input_features, float *output_features,
Int n, float alpha);
Int n, float alpha);
template void LeakyReLU_bp<float>(float *input_features,
float *d_input_features,
float *output_features, Int n, float alpha);
float *d_input_features,
float *output_features, Int n, float alpha);
template void cuda_MaxPooling_ForwardPass<float>(float *input_features,
float *output_features,
Int nPlanes, Int input_stride,
Int output_stride,
RuleBook _rules);
float *output_features,
Int nPlanes, Int input_stride,
Int output_stride,
RuleBook _rules);
template void cuda_MaxPooling_BackwardPass<float>(
float *input_features, float *d_input_features, float *output_features,
float *d_output_features, Int nPlanes, Int input_stride, Int output_stride,
RuleBook _rules);
template void cuda_SparseToDense_ForwardPass<float>(float *input_features,
float *output_features,
Int nPlanes,
Int spatialVolume,
RuleBook _rules);
float *output_features,
Int nPlanes,
Int spatialVolume,
RuleBook _rules);
template void cuda_SparseToDense_BackwardPass<float>(float *d_input_features,
float *d_output_features,
Int nPlanes,
Int spatialVolume,
RuleBook _rules);
float *d_output_features,
Int nPlanes,
Int spatialVolume,
RuleBook _rules);
template void cuda_UnPooling_ForwardPass<float>(float *input_features,
float *output_features,
Int nPlanes, Int input_stride,
Int output_stride,
RuleBook _rules);
float *output_features,
Int nPlanes, Int input_stride,
Int output_stride,
RuleBook _rules);
template void cuda_UnPooling_BackwardPass<float>(float *d_input_features,
float *d_output_features,
Int nPlanes, Int input_stride,
Int output_stride,
RuleBook _rules);
float *d_output_features,
Int nPlanes, Int input_stride,
Int output_stride,
RuleBook _rules);
template void bn_f<float>(float *iF, float *oF, Int nPlanes, Int input_stride,
Int output_stride, Int nActive, float *saveMean,
float *saveInvStd, float *runningMean,
float *runningVar, float *weight, float *bias,
float eps, float momentum, bool train,
float leakiness);
Int output_stride, Int nActive, float *saveMean,
float *saveInvStd, float *runningMean,
float *runningVar, float *weight, float *bias,
float eps, float momentum, bool train,
float leakiness);
template void bn_b<float>(float *input_features, float *d_input_features,
float *output_features, float *d_output_features,
Int nPlanes, Int input_stride, Int output_stride,
Int nActive, float *saveMean, float *saveInvStd,
float *runningMean, float *runningVar, float *weight,
float *bias, float *d_weight, float *d_bias,
float leakiness);
float *output_features, float *d_output_features,
Int nPlanes, Int input_stride, Int output_stride,
Int nActive, float *saveMean, float *saveInvStd,
float *runningMean, float *runningVar, float *weight,
float *bias, float *d_weight, float *d_bias,
float leakiness);
template void bmd_f<float>(float *input_features, float *output_features,
float *noise, Int nActive, Int nPlanes, float alpha);
float *noise, Int nActive, Int nPlanes, float alpha);
template void bmd_b<float>(float *input_features, float *d_input_features,
float *d_output_features, float *noise, Int nActive,
Int nPlanes, float alpha);
float *d_output_features, float *noise, Int nActive,
Int nPlanes, float alpha);
......@@ -6,7 +6,7 @@
forward_pass_multiplyAdd_count = 0
forward_pass_hidden_states = 0
from .activations import Tanh, Sigmoid, ReLU, ELU, SELU, BatchNormELU
from .activations import Tanh, Sigmoid, ReLU, LeakyReLU, ELU, SELU, BatchNormELU
from .averagePooling import AveragePooling
from .batchNormalization import BatchNormalization, BatchNormReLU, BatchNormLeakyReLU
from .classificationTrainValidate import ClassificationTrainValidate
......
......@@ -22,6 +22,18 @@ class Sigmoid(Module):
return output
class LeakyReLU(Module):
def __init__(self,leak=1/3):
Module.__init__(self)
self.leak=leak
def forward(self, input):
output = SparseConvNetTensor()
output.features = F.leaky_relu(input.features,self.leak)
output.metadata = input.metadata
output.spatial_size = input.spatial_size
return output
class Tanh(Module):
def forward(self, input):
output = SparseConvNetTensor()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment