"include/git@developer.sourcefind.cn:jerrrrry/infinicore.git" did not exist on "59e03eb94e4605a2338499d8f8ae17ec455c0bd6"
Commit f9552033 authored by Benjamin Thomas Graham's avatar Benjamin Thomas Graham
Browse files

initial commit

parents
// Copyright 2016-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
void scn_2_drawCurve(void **m, THFloatTensor *features, THFloatTensor *stroke);
long scn_readPtr(void **ptr);
void scn_writePtr(long p, void **ptr);
double scn_ruleBookBits(void);
double scn_1_addSampleFromThresholdedTensor(void **m, THFloatTensor *features_,
THFloatTensor *tensor_,
THLongTensor *offset_,
THLongTensor *spatialSize_,
float threshold);
void scn_1_batchAddSample(void **m);
void scn_1_createMetadataForDenseToSparse(void **m, THLongTensor *spatialSize_,
THLongTensor *pad, THLongTensor *nz,
long batchSize);
void scn_1_freeMetadata(void **metadata);
void scn_1_generateRuleBooks3s2(void **m);
void scn_1_generateRuleBooks2s2(void **m);
void scn_1_setInputSpatialSize(void **m, THLongTensor *spatialSize);
void scn_1_setInputSpatialLocation(void **m, THFloatTensor *features,
THLongTensor *location, THFloatTensor *vec,
_Bool overwrite);
double scn_2_addSampleFromThresholdedTensor(void **m, THFloatTensor *features_,
THFloatTensor *tensor_,
THLongTensor *offset_,
THLongTensor *spatialSize_,
float threshold);
void scn_2_batchAddSample(void **m);
void scn_2_createMetadataForDenseToSparse(void **m, THLongTensor *spatialSize_,
THLongTensor *pad, THLongTensor *nz,
long batchSize);
void scn_2_freeMetadata(void **metadata);
void scn_2_generateRuleBooks3s2(void **m);
void scn_2_generateRuleBooks2s2(void **m);
void scn_2_setInputSpatialSize(void **m, THLongTensor *spatialSize);
void scn_2_setInputSpatialLocation(void **m, THFloatTensor *features,
THLongTensor *location, THFloatTensor *vec,
_Bool overwrite);
double scn_3_addSampleFromThresholdedTensor(void **m, THFloatTensor *features_,
THFloatTensor *tensor_,
THLongTensor *offset_,
THLongTensor *spatialSize_,
float threshold);
void scn_3_batchAddSample(void **m);
void scn_3_createMetadataForDenseToSparse(void **m, THLongTensor *spatialSize_,
THLongTensor *pad, THLongTensor *nz,
long batchSize);
void scn_3_freeMetadata(void **metadata);
void scn_3_generateRuleBooks3s2(void **m);
void scn_3_generateRuleBooks2s2(void **m);
void scn_3_setInputSpatialSize(void **m, THLongTensor *spatialSize);
void scn_3_setInputSpatialLocation(void **m, THFloatTensor *features,
THLongTensor *location, THFloatTensor *vec,
_Bool overwrite);
double scn_4_addSampleFromThresholdedTensor(void **m, THFloatTensor *features_,
THFloatTensor *tensor_,
THLongTensor *offset_,
THLongTensor *spatialSize_,
float threshold);
void scn_4_batchAddSample(void **m);
void scn_4_createMetadataForDenseToSparse(void **m, THLongTensor *spatialSize_,
THLongTensor *pad, THLongTensor *nz,
long batchSize);
void scn_4_freeMetadata(void **metadata);
void scn_4_generateRuleBooks3s2(void **m);
void scn_4_generateRuleBooks2s2(void **m);
void scn_4_setInputSpatialSize(void **m, THLongTensor *spatialSize);
void scn_4_setInputSpatialLocation(void **m, THFloatTensor *features,
THLongTensor *location, THFloatTensor *vec,
_Bool overwrite);
double scn_5_addSampleFromThresholdedTensor(void **m, THFloatTensor *features_,
THFloatTensor *tensor_,
THLongTensor *offset_,
THLongTensor *spatialSize_,
float threshold);
void scn_5_batchAddSample(void **m);
void scn_5_createMetadataForDenseToSparse(void **m, THLongTensor *spatialSize_,
THLongTensor *pad, THLongTensor *nz,
long batchSize);
void scn_5_freeMetadata(void **metadata);
void scn_5_generateRuleBooks3s2(void **m);
void scn_5_generateRuleBooks2s2(void **m);
void scn_5_setInputSpatialSize(void **m, THLongTensor *spatialSize);
void scn_5_setInputSpatialLocation(void **m, THFloatTensor *features,
THLongTensor *location, THFloatTensor *vec,
_Bool overwrite);
double scn_6_addSampleFromThresholdedTensor(void **m, THFloatTensor *features_,
THFloatTensor *tensor_,
THLongTensor *offset_,
THLongTensor *spatialSize_,
float threshold);
void scn_6_batchAddSample(void **m);
void scn_6_createMetadataForDenseToSparse(void **m, THLongTensor *spatialSize_,
THLongTensor *pad, THLongTensor *nz,
long batchSize);
void scn_6_freeMetadata(void **metadata);
void scn_6_generateRuleBooks3s2(void **m);
void scn_6_generateRuleBooks2s2(void **m);
void scn_6_setInputSpatialSize(void **m, THLongTensor *spatialSize);
void scn_6_setInputSpatialLocation(void **m, THFloatTensor *features,
THLongTensor *location, THFloatTensor *vec,
_Bool overwrite);
double scn_7_addSampleFromThresholdedTensor(void **m, THFloatTensor *features_,
THFloatTensor *tensor_,
THLongTensor *offset_,
THLongTensor *spatialSize_,
float threshold);
void scn_7_batchAddSample(void **m);
void scn_7_createMetadataForDenseToSparse(void **m, THLongTensor *spatialSize_,
THLongTensor *pad, THLongTensor *nz,
long batchSize);
void scn_7_freeMetadata(void **metadata);
void scn_7_generateRuleBooks3s2(void **m);
void scn_7_generateRuleBooks2s2(void **m);
void scn_7_setInputSpatialSize(void **m, THLongTensor *spatialSize);
void scn_7_setInputSpatialLocation(void **m, THFloatTensor *features,
THLongTensor *location, THFloatTensor *vec,
_Bool overwrite);
double scn_8_addSampleFromThresholdedTensor(void **m, THFloatTensor *features_,
THFloatTensor *tensor_,
THLongTensor *offset_,
THLongTensor *spatialSize_,
float threshold);
void scn_8_batchAddSample(void **m);
void scn_8_createMetadataForDenseToSparse(void **m, THLongTensor *spatialSize_,
THLongTensor *pad, THLongTensor *nz,
long batchSize);
void scn_8_freeMetadata(void **metadata);
void scn_8_generateRuleBooks3s2(void **m);
void scn_8_generateRuleBooks2s2(void **m);
void scn_8_setInputSpatialSize(void **m, THLongTensor *spatialSize);
void scn_8_setInputSpatialLocation(void **m, THFloatTensor *features,
THLongTensor *location, THFloatTensor *vec,
_Bool overwrite);
double scn_9_addSampleFromThresholdedTensor(void **m, THFloatTensor *features_,
THFloatTensor *tensor_,
THLongTensor *offset_,
THLongTensor *spatialSize_,
float threshold);
void scn_9_batchAddSample(void **m);
void scn_9_createMetadataForDenseToSparse(void **m, THLongTensor *spatialSize_,
THLongTensor *pad, THLongTensor *nz,
long batchSize);
void scn_9_freeMetadata(void **metadata);
void scn_9_generateRuleBooks3s2(void **m);
void scn_9_generateRuleBooks2s2(void **m);
void scn_9_setInputSpatialSize(void **m, THLongTensor *spatialSize);
void scn_9_setInputSpatialLocation(void **m, THFloatTensor *features,
THLongTensor *location, THFloatTensor *vec,
_Bool overwrite);
double scn_10_addSampleFromThresholdedTensor(void **m, THFloatTensor *features_,
THFloatTensor *tensor_,
THLongTensor *offset_,
THLongTensor *spatialSize_,
float threshold);
void scn_10_batchAddSample(void **m);
void scn_10_createMetadataForDenseToSparse(void **m, THLongTensor *spatialSize_,
THLongTensor *pad, THLongTensor *nz,
long batchSize);
void scn_10_freeMetadata(void **metadata);
void scn_10_generateRuleBooks3s2(void **m);
void scn_10_generateRuleBooks2s2(void **m);
void scn_10_setInputSpatialSize(void **m, THLongTensor *spatialSize);
void scn_10_setInputSpatialLocation(void **m, THFloatTensor *features,
THLongTensor *location, THFloatTensor *vec,
_Bool overwrite);
void scn_cpu_float_AffineReluTrivialConvolution_updateOutput(
THFloatTensor *input_features, THFloatTensor *output_features,
THFloatTensor *affineWeight, THFloatTensor *affineBias,
THFloatTensor *convWeight);
void scn_cpu_float_AffineReluTrivialConvolution_backward(
THFloatTensor *input_features, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, THFloatTensor *affineWeight,
THFloatTensor *d_affineWeight, THFloatTensor *affineBias,
THFloatTensor *d_affineBias, THFloatTensor *convWeight,
THFloatTensor *d_convWeight, _Bool additiveGrad);
// BatchwiseMultiplicativeDropout
void scn_cpu_float_BatchwiseMultiplicativeDropout_updateOutput(
THFloatTensor *input_features, THFloatTensor *output_features,
THFloatTensor *noise, long nPlanes, long input_stride, long output_stride,
float alpha);
void scn_cpu_float_BatchwiseMultiplicativeDropout_updateGradInput(
THFloatTensor *input_features, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, THFloatTensor *noise, long nPlanes,
long input_stride, long output_stride, float alpha);
// BatchNormalization
void scn_cpu_float_BatchNormalization_updateOutput(
THFloatTensor *input_features, THFloatTensor *output_features,
THFloatTensor *saveMean, THFloatTensor *saveInvStd,
THFloatTensor *runningMean, THFloatTensor *runningVar,
THFloatTensor *weight, THFloatTensor *bias, float eps, float momentum,
_Bool train, float leakiness);
void scn_cpu_float_BatchNormalization_backward(
THFloatTensor *input_features, THFloatTensor *d_input_features,
THFloatTensor *output_features, THFloatTensor *d_output_features,
THFloatTensor *saveMean, THFloatTensor *saveInvStd,
THFloatTensor *runningMean, THFloatTensor *runningVar,
THFloatTensor *weight, THFloatTensor *bias, THFloatTensor *d_weight,
THFloatTensor *d_bias, float leakiness);
// BatchNormalizationInTensor
void scn_cpu_float_BatchNormalizationInTensor_updateOutput(
THFloatTensor *input_features, THFloatTensor *output_features,
THFloatTensor *saveMean, THFloatTensor *saveInvStd,
THFloatTensor *runningMean, THFloatTensor *runningVar,
THFloatTensor *weight, THFloatTensor *bias, float eps, float momentum,
_Bool train, float leakiness);
// LeakyReLU
void scn_cpu_float_LeakyReLU_updateOutput(THFloatTensor *input_features,
THFloatTensor *output_features,
long n, float alpha);
void scn_cpu_float_LeakyReLU_updateGradInput(THFloatTensor *input_features,
THFloatTensor *d_input_features,
THFloatTensor *d_output_features,
long n, float alpha);
// NetworkInNetwork
double scn_cpu_float_NetworkInNetwork_updateOutput(
THFloatTensor *input_features, THFloatTensor *output_features,
THFloatTensor *weight, THFloatTensor *bias);
void scn_cpu_float_NetworkInNetwork_updateGradInput(
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight);
void scn_cpu_float_NetworkInNetwork_accGradParameters(
THFloatTensor *input_features, THFloatTensor *d_output_features,
THFloatTensor *d_weight, THFloatTensor *d_bias);
void scn_cpu_double_AffineReluTrivialConvolution_updateOutput(
THDoubleTensor *input_features, THDoubleTensor *output_features,
THDoubleTensor *affineWeight, THDoubleTensor *affineBias,
THDoubleTensor *convWeight);
void scn_cpu_double_AffineReluTrivialConvolution_backward(
THDoubleTensor *input_features, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, THDoubleTensor *affineWeight,
THDoubleTensor *d_affineWeight, THDoubleTensor *affineBias,
THDoubleTensor *d_affineBias, THDoubleTensor *convWeight,
THDoubleTensor *d_convWeight, _Bool additiveGrad);
// BatchwiseMultiplicativeDropout
void scn_cpu_double_BatchwiseMultiplicativeDropout_updateOutput(
THDoubleTensor *input_features, THDoubleTensor *output_features,
THDoubleTensor *noise, long nPlanes, long input_stride, long output_stride,
float alpha);
void scn_cpu_double_BatchwiseMultiplicativeDropout_updateGradInput(
THDoubleTensor *input_features, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, THDoubleTensor *noise, long nPlanes,
long input_stride, long output_stride, float alpha);
// BatchNormalization
void scn_cpu_double_BatchNormalization_updateOutput(
THDoubleTensor *input_features, THDoubleTensor *output_features,
THDoubleTensor *saveMean, THDoubleTensor *saveInvStd,
THDoubleTensor *runningMean, THDoubleTensor *runningVar,
THDoubleTensor *weight, THDoubleTensor *bias, double eps, double momentum,
_Bool train, double leakiness);
void scn_cpu_double_BatchNormalization_backward(
THDoubleTensor *input_features, THDoubleTensor *d_input_features,
THDoubleTensor *output_features, THDoubleTensor *d_output_features,
THDoubleTensor *saveMean, THDoubleTensor *saveInvStd,
THDoubleTensor *runningMean, THDoubleTensor *runningVar,
THDoubleTensor *weight, THDoubleTensor *bias, THDoubleTensor *d_weight,
THDoubleTensor *d_bias, double leakiness);
// BatchNormalizationInTensor
void scn_cpu_double_BatchNormalizationInTensor_updateOutput(
THDoubleTensor *input_features, THDoubleTensor *output_features,
THDoubleTensor *saveMean, THDoubleTensor *saveInvStd,
THDoubleTensor *runningMean, THDoubleTensor *runningVar,
THDoubleTensor *weight, THDoubleTensor *bias, double eps, double momentum,
_Bool train, double leakiness);
// LeakyReLU
void scn_cpu_double_LeakyReLU_updateOutput(THDoubleTensor *input_features,
THDoubleTensor *output_features,
long n, float alpha);
void scn_cpu_double_LeakyReLU_updateGradInput(THDoubleTensor *input_features,
THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features,
long n, float alpha);
// NetworkInNetwork
double scn_cpu_double_NetworkInNetwork_updateOutput(
THDoubleTensor *input_features, THDoubleTensor *output_features,
THDoubleTensor *weight, THDoubleTensor *bias);
void scn_cpu_double_NetworkInNetwork_updateGradInput(
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight);
void scn_cpu_double_NetworkInNetwork_accGradParameters(
THDoubleTensor *input_features, THDoubleTensor *d_output_features,
THDoubleTensor *d_weight, THDoubleTensor *d_bias);
// ActivePooling
void scn_cpu_float1ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer, _Bool average);
void scn_cpu_float1ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_float1AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float1AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_float1Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float1Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_float1Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float1Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_float1MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float1MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *output_features,
THFloatTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_float1SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer);
void scn_cpu_float1SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_float1ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *output_features,
THFloatTensor *weight, THFloatTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_float1ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, THFloatTensor *weight,
THFloatTensor *d_weight, THFloatTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_float2ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer, _Bool average);
void scn_cpu_float2ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_float2AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float2AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_float2Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float2Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_float2Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float2Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_float2MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float2MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *output_features,
THFloatTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_float2SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer);
void scn_cpu_float2SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_float2ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *output_features,
THFloatTensor *weight, THFloatTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_float2ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, THFloatTensor *weight,
THFloatTensor *d_weight, THFloatTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_float3ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer, _Bool average);
void scn_cpu_float3ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_float3AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float3AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_float3Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float3Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_float3Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float3Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_float3MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float3MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *output_features,
THFloatTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_float3SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer);
void scn_cpu_float3SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_float3ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *output_features,
THFloatTensor *weight, THFloatTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_float3ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, THFloatTensor *weight,
THFloatTensor *d_weight, THFloatTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_float4ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer, _Bool average);
void scn_cpu_float4ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_float4AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float4AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_float4Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float4Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_float4Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float4Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_float4MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float4MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *output_features,
THFloatTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_float4SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer);
void scn_cpu_float4SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_float4ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *output_features,
THFloatTensor *weight, THFloatTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_float4ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, THFloatTensor *weight,
THFloatTensor *d_weight, THFloatTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_float5ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer, _Bool average);
void scn_cpu_float5ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_float5AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float5AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_float5Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float5Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_float5Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float5Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_float5MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float5MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *output_features,
THFloatTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_float5SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer);
void scn_cpu_float5SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_float5ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *output_features,
THFloatTensor *weight, THFloatTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_float5ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, THFloatTensor *weight,
THFloatTensor *d_weight, THFloatTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_float6ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer, _Bool average);
void scn_cpu_float6ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_float6AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float6AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_float6Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float6Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_float6Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float6Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_float6MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float6MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *output_features,
THFloatTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_float6SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer);
void scn_cpu_float6SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_float6ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *output_features,
THFloatTensor *weight, THFloatTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_float6ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, THFloatTensor *weight,
THFloatTensor *d_weight, THFloatTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_float7ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer, _Bool average);
void scn_cpu_float7ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_float7AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float7AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_float7Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float7Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_float7Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float7Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_float7MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float7MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *output_features,
THFloatTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_float7SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer);
void scn_cpu_float7SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_float7ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *output_features,
THFloatTensor *weight, THFloatTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_float7ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, THFloatTensor *weight,
THFloatTensor *d_weight, THFloatTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_float8ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer, _Bool average);
void scn_cpu_float8ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_float8AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float8AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_float8Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float8Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_float8Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float8Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_float8MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float8MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *output_features,
THFloatTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_float8SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer);
void scn_cpu_float8SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_float8ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *output_features,
THFloatTensor *weight, THFloatTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_float8ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, THFloatTensor *weight,
THFloatTensor *d_weight, THFloatTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_float9ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer, _Bool average);
void scn_cpu_float9ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_float9AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float9AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_float9Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float9Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_float9Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float9Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_float9MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float9MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *output_features,
THFloatTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_float9SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer);
void scn_cpu_float9SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_float9ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *output_features,
THFloatTensor *weight, THFloatTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_float9ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, THFloatTensor *weight,
THFloatTensor *d_weight, THFloatTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_float10ActivePooling_updateOutput(
THLongTensor *inputSize, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, void *rulesBuffer, _Bool average);
void scn_cpu_float10ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_float10AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float10AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_float10Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float10Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_float10Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, THFloatTensor *weight, THFloatTensor *bias,
long filterVolume, void *rulesBuffer);
void scn_cpu_float10Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
THFloatTensor *weight, THFloatTensor *d_weight, THFloatTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_float10MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_float10MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *output_features,
THFloatTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_float10SparseToDense_updateOutput(THLongTensor *inputSize,
void **m,
THFloatTensor *input_features,
THFloatTensor *output_features,
void *rulesBuffer);
void scn_cpu_float10SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THFloatTensor *input_features,
THFloatTensor *d_input_features, THFloatTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_float10ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *output_features,
THFloatTensor *weight, THFloatTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_float10ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THFloatTensor *input_features, THFloatTensor *d_input_features,
THFloatTensor *d_output_features, THFloatTensor *weight,
THFloatTensor *d_weight, THFloatTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_double1ActivePooling_updateOutput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, void *rulesBuffer, _Bool average);
void scn_cpu_double1ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_double1AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double1AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_double1Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double1Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_double1Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double1Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_double1MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double1MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *output_features,
THDoubleTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_double1SparseToDense_updateOutput(THLongTensor *inputSize,
void **m,
THDoubleTensor *input_features,
THDoubleTensor *output_features,
void *rulesBuffer);
void scn_cpu_double1SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_double1ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *output_features,
THDoubleTensor *weight, THDoubleTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_double1ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, THDoubleTensor *weight,
THDoubleTensor *d_weight, THDoubleTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_double2ActivePooling_updateOutput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, void *rulesBuffer, _Bool average);
void scn_cpu_double2ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_double2AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double2AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_double2Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double2Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_double2Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double2Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_double2MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double2MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *output_features,
THDoubleTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_double2SparseToDense_updateOutput(THLongTensor *inputSize,
void **m,
THDoubleTensor *input_features,
THDoubleTensor *output_features,
void *rulesBuffer);
void scn_cpu_double2SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_double2ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *output_features,
THDoubleTensor *weight, THDoubleTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_double2ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, THDoubleTensor *weight,
THDoubleTensor *d_weight, THDoubleTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_double3ActivePooling_updateOutput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, void *rulesBuffer, _Bool average);
void scn_cpu_double3ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_double3AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double3AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_double3Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double3Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_double3Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double3Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_double3MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double3MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *output_features,
THDoubleTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_double3SparseToDense_updateOutput(THLongTensor *inputSize,
void **m,
THDoubleTensor *input_features,
THDoubleTensor *output_features,
void *rulesBuffer);
void scn_cpu_double3SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_double3ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *output_features,
THDoubleTensor *weight, THDoubleTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_double3ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, THDoubleTensor *weight,
THDoubleTensor *d_weight, THDoubleTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_double4ActivePooling_updateOutput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, void *rulesBuffer, _Bool average);
void scn_cpu_double4ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_double4AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double4AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_double4Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double4Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_double4Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double4Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_double4MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double4MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *output_features,
THDoubleTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_double4SparseToDense_updateOutput(THLongTensor *inputSize,
void **m,
THDoubleTensor *input_features,
THDoubleTensor *output_features,
void *rulesBuffer);
void scn_cpu_double4SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_double4ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *output_features,
THDoubleTensor *weight, THDoubleTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_double4ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, THDoubleTensor *weight,
THDoubleTensor *d_weight, THDoubleTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_double5ActivePooling_updateOutput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, void *rulesBuffer, _Bool average);
void scn_cpu_double5ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_double5AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double5AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_double5Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double5Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_double5Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double5Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_double5MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double5MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *output_features,
THDoubleTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_double5SparseToDense_updateOutput(THLongTensor *inputSize,
void **m,
THDoubleTensor *input_features,
THDoubleTensor *output_features,
void *rulesBuffer);
void scn_cpu_double5SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_double5ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *output_features,
THDoubleTensor *weight, THDoubleTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_double5ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, THDoubleTensor *weight,
THDoubleTensor *d_weight, THDoubleTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_double6ActivePooling_updateOutput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, void *rulesBuffer, _Bool average);
void scn_cpu_double6ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_double6AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double6AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_double6Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double6Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_double6Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double6Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_double6MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double6MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *output_features,
THDoubleTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_double6SparseToDense_updateOutput(THLongTensor *inputSize,
void **m,
THDoubleTensor *input_features,
THDoubleTensor *output_features,
void *rulesBuffer);
void scn_cpu_double6SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_double6ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *output_features,
THDoubleTensor *weight, THDoubleTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_double6ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, THDoubleTensor *weight,
THDoubleTensor *d_weight, THDoubleTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_double7ActivePooling_updateOutput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, void *rulesBuffer, _Bool average);
void scn_cpu_double7ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_double7AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double7AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_double7Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double7Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_double7Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double7Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_double7MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double7MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *output_features,
THDoubleTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_double7SparseToDense_updateOutput(THLongTensor *inputSize,
void **m,
THDoubleTensor *input_features,
THDoubleTensor *output_features,
void *rulesBuffer);
void scn_cpu_double7SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_double7ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *output_features,
THDoubleTensor *weight, THDoubleTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_double7ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, THDoubleTensor *weight,
THDoubleTensor *d_weight, THDoubleTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_double8ActivePooling_updateOutput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, void *rulesBuffer, _Bool average);
void scn_cpu_double8ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_double8AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double8AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_double8Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double8Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_double8Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double8Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_double8MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double8MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *output_features,
THDoubleTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_double8SparseToDense_updateOutput(THLongTensor *inputSize,
void **m,
THDoubleTensor *input_features,
THDoubleTensor *output_features,
void *rulesBuffer);
void scn_cpu_double8SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_double8ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *output_features,
THDoubleTensor *weight, THDoubleTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_double8ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, THDoubleTensor *weight,
THDoubleTensor *d_weight, THDoubleTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_double9ActivePooling_updateOutput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, void *rulesBuffer, _Bool average);
void scn_cpu_double9ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_double9AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double9AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_double9Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double9Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_double9Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double9Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_double9MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double9MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *output_features,
THDoubleTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_double9SparseToDense_updateOutput(THLongTensor *inputSize,
void **m,
THDoubleTensor *input_features,
THDoubleTensor *output_features,
void *rulesBuffer);
void scn_cpu_double9SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_double9ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *output_features,
THDoubleTensor *weight, THDoubleTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_double9ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, THDoubleTensor *weight,
THDoubleTensor *d_weight, THDoubleTensor *d_bias, long filterVolume,
void *rulesBuffer);
// ActivePooling
void scn_cpu_double10ActivePooling_updateOutput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, void *rulesBuffer, _Bool average);
void scn_cpu_double10ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, void *rulesBuffer, _Bool average);
// Average Pooling
void scn_cpu_double10AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double10AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
long nFeaturesToDrop, void *rulesBuffer);
double scn_cpu_double10Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double10Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
double scn_cpu_double10Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, THDoubleTensor *weight,
THDoubleTensor *bias, long filterVolume, void *rulesBuffer);
void scn_cpu_double10Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
THDoubleTensor *weight, THDoubleTensor *d_weight, THDoubleTensor *d_bias,
long filterVolume, void *rulesBuffer);
// Max Pooling
void scn_cpu_double10MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *output_features, long nFeaturesToDrop, void *rulesBuffer);
void scn_cpu_double10MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *output_features,
THDoubleTensor *d_output_features, long nFeaturesToDrop, void *rulesBuffer);
// SparseToDense
void scn_cpu_double10SparseToDense_updateOutput(THLongTensor *inputSize,
void **m,
THDoubleTensor *input_features,
THDoubleTensor *output_features,
void *rulesBuffer);
void scn_cpu_double10SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THDoubleTensor *input_features,
THDoubleTensor *d_input_features, THDoubleTensor *d_output_features,
void *rulesBuffer);
double scn_cpu_double10ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *output_features,
THDoubleTensor *weight, THDoubleTensor *bias, long filterVolume,
void *rulesBuffer);
void scn_cpu_double10ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THDoubleTensor *input_features, THDoubleTensor *d_input_features,
THDoubleTensor *d_output_features, THDoubleTensor *weight,
THDoubleTensor *d_weight, THDoubleTensor *d_bias, long filterVolume,
void *rulesBuffer);
// Copyright 2016-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
void scn_gpu_float_AffineReluTrivialConvolution_updateOutput(
THCudaTensor *input_features, THCudaTensor *output_features,
THCudaTensor *affineWeight, THCudaTensor *affineBias,
THCudaTensor *convWeight);
void scn_gpu_float_AffineReluTrivialConvolution_backward(
THCudaTensor *input_features, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaTensor *affineWeight,
THCudaTensor *d_affineWeight, THCudaTensor *affineBias,
THCudaTensor *d_affineBias, THCudaTensor *convWeight,
THCudaTensor *d_convWeight, _Bool additiveGrad);
// BatchwiseMultiplicativeDropout
void scn_gpu_float_BatchwiseMultiplicativeDropout_updateOutput(
THCudaTensor *input_features, THCudaTensor *output_features,
THCudaTensor *noise, long nPlanes, long input_stride, long output_stride,
float alpha);
void scn_gpu_float_BatchwiseMultiplicativeDropout_updateGradInput(
THCudaTensor *input_features, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaTensor *noise, long nPlanes,
long input_stride, long output_stride, float alpha);
// BatchNormalization
void scn_gpu_float_BatchNormalization_updateOutput(
THCudaTensor *input_features, THCudaTensor *output_features,
THCudaTensor *saveMean, THCudaTensor *saveInvStd, THCudaTensor *runningMean,
THCudaTensor *runningVar, THCudaTensor *weight, THCudaTensor *bias,
float eps, float momentum, _Bool train, float leakiness);
void scn_gpu_float_BatchNormalization_backward(
THCudaTensor *input_features, THCudaTensor *d_input_features,
THCudaTensor *output_features, THCudaTensor *d_output_features,
THCudaTensor *saveMean, THCudaTensor *saveInvStd, THCudaTensor *runningMean,
THCudaTensor *runningVar, THCudaTensor *weight, THCudaTensor *bias,
THCudaTensor *d_weight, THCudaTensor *d_bias, float leakiness);
// BatchNormalizationInTensor
void scn_gpu_float_BatchNormalizationInTensor_updateOutput(
THCudaTensor *input_features, THCudaTensor *output_features,
THCudaTensor *saveMean, THCudaTensor *saveInvStd, THCudaTensor *runningMean,
THCudaTensor *runningVar, THCudaTensor *weight, THCudaTensor *bias,
float eps, float momentum, _Bool train, float leakiness);
// LeakyReLU
void scn_gpu_float_LeakyReLU_updateOutput(THCudaTensor *input_features,
THCudaTensor *output_features, long n,
float alpha);
void scn_gpu_float_LeakyReLU_updateGradInput(THCudaTensor *input_features,
THCudaTensor *d_input_features,
THCudaTensor *d_output_features,
long n, float alpha);
// NetworkInNetwork
double scn_gpu_float_NetworkInNetwork_updateOutput(
THCudaTensor *input_features, THCudaTensor *output_features,
THCudaTensor *weight, THCudaTensor *bias);
void scn_gpu_float_NetworkInNetwork_updateGradInput(
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight);
void scn_gpu_float_NetworkInNetwork_accGradParameters(
THCudaTensor *input_features, THCudaTensor *d_output_features,
THCudaTensor *d_weight, THCudaTensor *d_bias);
// ActivePooling
void scn_gpu_float1ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer,
_Bool average);
void scn_gpu_float1ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaIntTensor *rulesBuffer,
_Bool average);
// Average Pooling
void scn_gpu_float1AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float1AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
long nFeaturesToDrop, THCudaIntTensor *rulesBuffer);
double scn_gpu_float1Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float1Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
double scn_gpu_float1Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float1Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
// Max Pooling
void scn_gpu_float1MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float1MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *output_features,
THCudaTensor *d_output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
// SparseToDense
void scn_gpu_float1SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float1SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaIntTensor *rulesBuffer);
double scn_gpu_float1ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *output_features,
THCudaTensor *weight, THCudaTensor *bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float1ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaTensor *weight,
THCudaTensor *d_weight, THCudaTensor *d_bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
// ActivePooling
void scn_gpu_float2ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer,
_Bool average);
void scn_gpu_float2ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaIntTensor *rulesBuffer,
_Bool average);
// Average Pooling
void scn_gpu_float2AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float2AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
long nFeaturesToDrop, THCudaIntTensor *rulesBuffer);
double scn_gpu_float2Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float2Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
double scn_gpu_float2Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float2Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
// Max Pooling
void scn_gpu_float2MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float2MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *output_features,
THCudaTensor *d_output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
// SparseToDense
void scn_gpu_float2SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float2SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaIntTensor *rulesBuffer);
double scn_gpu_float2ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *output_features,
THCudaTensor *weight, THCudaTensor *bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float2ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaTensor *weight,
THCudaTensor *d_weight, THCudaTensor *d_bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
// ActivePooling
void scn_gpu_float3ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer,
_Bool average);
void scn_gpu_float3ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaIntTensor *rulesBuffer,
_Bool average);
// Average Pooling
void scn_gpu_float3AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float3AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
long nFeaturesToDrop, THCudaIntTensor *rulesBuffer);
double scn_gpu_float3Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float3Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
double scn_gpu_float3Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float3Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
// Max Pooling
void scn_gpu_float3MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float3MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *output_features,
THCudaTensor *d_output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
// SparseToDense
void scn_gpu_float3SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float3SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaIntTensor *rulesBuffer);
double scn_gpu_float3ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *output_features,
THCudaTensor *weight, THCudaTensor *bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float3ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaTensor *weight,
THCudaTensor *d_weight, THCudaTensor *d_bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
// ActivePooling
void scn_gpu_float4ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer,
_Bool average);
void scn_gpu_float4ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaIntTensor *rulesBuffer,
_Bool average);
// Average Pooling
void scn_gpu_float4AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float4AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
long nFeaturesToDrop, THCudaIntTensor *rulesBuffer);
double scn_gpu_float4Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float4Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
double scn_gpu_float4Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float4Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
// Max Pooling
void scn_gpu_float4MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float4MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *output_features,
THCudaTensor *d_output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
// SparseToDense
void scn_gpu_float4SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float4SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaIntTensor *rulesBuffer);
double scn_gpu_float4ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *output_features,
THCudaTensor *weight, THCudaTensor *bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float4ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaTensor *weight,
THCudaTensor *d_weight, THCudaTensor *d_bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
// ActivePooling
void scn_gpu_float5ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer,
_Bool average);
void scn_gpu_float5ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaIntTensor *rulesBuffer,
_Bool average);
// Average Pooling
void scn_gpu_float5AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float5AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
long nFeaturesToDrop, THCudaIntTensor *rulesBuffer);
double scn_gpu_float5Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float5Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
double scn_gpu_float5Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float5Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
// Max Pooling
void scn_gpu_float5MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float5MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *output_features,
THCudaTensor *d_output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
// SparseToDense
void scn_gpu_float5SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float5SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaIntTensor *rulesBuffer);
double scn_gpu_float5ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *output_features,
THCudaTensor *weight, THCudaTensor *bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float5ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaTensor *weight,
THCudaTensor *d_weight, THCudaTensor *d_bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
// ActivePooling
void scn_gpu_float6ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer,
_Bool average);
void scn_gpu_float6ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaIntTensor *rulesBuffer,
_Bool average);
// Average Pooling
void scn_gpu_float6AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float6AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
long nFeaturesToDrop, THCudaIntTensor *rulesBuffer);
double scn_gpu_float6Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float6Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
double scn_gpu_float6Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float6Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
// Max Pooling
void scn_gpu_float6MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float6MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *output_features,
THCudaTensor *d_output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
// SparseToDense
void scn_gpu_float6SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float6SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaIntTensor *rulesBuffer);
double scn_gpu_float6ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *output_features,
THCudaTensor *weight, THCudaTensor *bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float6ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaTensor *weight,
THCudaTensor *d_weight, THCudaTensor *d_bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
// ActivePooling
void scn_gpu_float7ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer,
_Bool average);
void scn_gpu_float7ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaIntTensor *rulesBuffer,
_Bool average);
// Average Pooling
void scn_gpu_float7AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float7AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
long nFeaturesToDrop, THCudaIntTensor *rulesBuffer);
double scn_gpu_float7Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float7Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
double scn_gpu_float7Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float7Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
// Max Pooling
void scn_gpu_float7MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float7MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *output_features,
THCudaTensor *d_output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
// SparseToDense
void scn_gpu_float7SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float7SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaIntTensor *rulesBuffer);
double scn_gpu_float7ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *output_features,
THCudaTensor *weight, THCudaTensor *bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float7ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaTensor *weight,
THCudaTensor *d_weight, THCudaTensor *d_bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
// ActivePooling
void scn_gpu_float8ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer,
_Bool average);
void scn_gpu_float8ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaIntTensor *rulesBuffer,
_Bool average);
// Average Pooling
void scn_gpu_float8AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float8AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
long nFeaturesToDrop, THCudaIntTensor *rulesBuffer);
double scn_gpu_float8Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float8Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
double scn_gpu_float8Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float8Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
// Max Pooling
void scn_gpu_float8MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float8MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *output_features,
THCudaTensor *d_output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
// SparseToDense
void scn_gpu_float8SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float8SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaIntTensor *rulesBuffer);
double scn_gpu_float8ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *output_features,
THCudaTensor *weight, THCudaTensor *bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float8ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaTensor *weight,
THCudaTensor *d_weight, THCudaTensor *d_bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
// ActivePooling
void scn_gpu_float9ActivePooling_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer,
_Bool average);
void scn_gpu_float9ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaIntTensor *rulesBuffer,
_Bool average);
// Average Pooling
void scn_gpu_float9AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float9AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
long nFeaturesToDrop, THCudaIntTensor *rulesBuffer);
double scn_gpu_float9Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float9Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
double scn_gpu_float9Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float9Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
// Max Pooling
void scn_gpu_float9MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float9MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *output_features,
THCudaTensor *d_output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
// SparseToDense
void scn_gpu_float9SparseToDense_updateOutput(THLongTensor *inputSize, void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float9SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaIntTensor *rulesBuffer);
double scn_gpu_float9ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *output_features,
THCudaTensor *weight, THCudaTensor *bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float9ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaTensor *weight,
THCudaTensor *d_weight, THCudaTensor *d_bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
// ActivePooling
void scn_gpu_float10ActivePooling_updateOutput(
THLongTensor *inputSize, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaIntTensor *rulesBuffer, _Bool average);
void scn_gpu_float10ActivePooling_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaIntTensor *rulesBuffer,
_Bool average);
// Average Pooling
void scn_gpu_float10AveragePooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float10AveragePooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
long nFeaturesToDrop, THCudaIntTensor *rulesBuffer);
double scn_gpu_float10Convolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float10Convolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
double scn_gpu_float10Deconvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, THCudaTensor *weight, THCudaTensor *bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
void scn_gpu_float10Deconvolution_backward(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *filterSize,
THLongTensor *filterStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaTensor *weight, THCudaTensor *d_weight, THCudaTensor *d_bias,
long filterVolume, THCudaIntTensor *rulesBuffer);
// Max Pooling
void scn_gpu_float10MaxPooling_updateOutput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float10MaxPooling_updateGradInput(
THLongTensor *inputSize, THLongTensor *outputSize, THLongTensor *poolSize,
THLongTensor *poolStride, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *output_features,
THCudaTensor *d_output_features, long nFeaturesToDrop,
THCudaIntTensor *rulesBuffer);
// SparseToDense
void scn_gpu_float10SparseToDense_updateOutput(THLongTensor *inputSize,
void **m,
THCudaTensor *input_features,
THCudaTensor *output_features,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float10SparseToDense_updateGradInput(
THLongTensor *inputSize, void **m, THCudaTensor *input_features,
THCudaTensor *d_input_features, THCudaTensor *d_output_features,
THCudaIntTensor *rulesBuffer);
double scn_gpu_float10ValidConvolution_updateOutput(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *output_features,
THCudaTensor *weight, THCudaTensor *bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
void scn_gpu_float10ValidConvolution_backward(
THLongTensor *inputSize, THLongTensor *filterSize, void **m,
THCudaTensor *input_features, THCudaTensor *d_input_features,
THCudaTensor *d_output_features, THCudaTensor *weight,
THCudaTensor *d_weight, THCudaTensor *d_bias, long filterVolume,
THCudaIntTensor *rulesBuffer);
// Copyright 2016-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
#include <TH/TH.h>
#include <TH/THTensor.h>
#define scn_D_(NAME) TH_CONCAT_4(scn_, Dimension, _, NAME)
#define scn_DR_(NAME) TH_CONCAT_4(scn_cpu_, real, Dimension, NAME)
#define scn_R_(NAME) TH_CONCAT_4(scn_cpu_, real, _, NAME)
#define THOptionalTensorData(tensor) (tensor ? THTensor_(data)(tensor) : 0)
#include "generic/Geometry/Metadata.cpp"
#include "generic/Geometry/THGenerateDimTypes.h"
#include "generic/CPU/ActivePooling.cpp"
#include "generic/CPU/THGenerateDimFloatTypes.h"
#include "generic/CPU/AffineReluTrivialConvolution.cpp"
#include "generic/CPU/THGenerateFloatTypes.h"
#include "generic/CPU/AveragePooling.cpp"
#include "generic/CPU/THGenerateDimFloatTypes.h"
#include "generic/CPU/BatchwiseMultiplicativeDropout.cpp"
#include "generic/CPU/THGenerateFloatTypes.h"
#include "generic/CPU/BatchNormalization.cpp"
#include "generic/CPU/THGenerateFloatTypes.h"
#include "generic/CPU/Convolution.cpp"
#include "generic/CPU/THGenerateDimFloatTypes.h"
#include "generic/CPU/Deconvolution.cpp"
#include "generic/CPU/THGenerateDimFloatTypes.h"
#include "generic/CPU/LeakyReLU.cpp"
#include "generic/CPU/THGenerateFloatTypes.h"
#include "generic/CPU/MaxPooling.cpp"
#include "generic/CPU/THGenerateDimFloatTypes.h"
#include "generic/CPU/NetworkInNetwork.cpp"
#include "generic/CPU/THGenerateFloatTypes.h"
#include "generic/CPU/SparseToDense.cpp"
#include "generic/CPU/THGenerateDimFloatTypes.h"
extern "C" long scn_readPtr(void **ptr) { return (long)(ptr[0]); }
extern "C" void scn_writePtr(long p, void **ptr) { ptr[0] = (void *)p; }
extern "C" double scn_ruleBookBits() { return 8 * sizeof(uInt); }
#undef scn_D_
#undef scn_DR_
#undef scn_R_
#undef THOptionalTensorData
#include "drawCurve.cpp"
// Copyright 2016-present, Facebook, Inc.
// All rights reserved.
//
// This source code is licensed under the license found in the
// LICENSE file in the root directory of this source tree.
#include "init.cpp"
#include <THC/THC.h>
// #include <THC/THCTensor.h>
// #include <THC/THCNumerics.cuh>
// #include <THC/THCAtomics.cuh>
extern THCState *state;
#define scn_R_(NAME) TH_CONCAT_4(scn_gpu_, real, _, NAME)
#define scn_DR_(NAME) TH_CONCAT_4(scn_gpu_, real, Dimension, NAME)
#include "generic/GPU/ActivePooling.cu"
#include "generic/GPU/THGenerateDimCudaFloatTypes.h"
#include "generic/GPU/AffineReluTrivialConvolution.cu"
#include "generic/GPU/THGenerateCudaFloatTypes.h"
#include "generic/GPU/AveragePooling.cu"
#include "generic/GPU/THGenerateDimCudaFloatTypes.h"
#include "generic/GPU/BatchwiseMultiplicativeDropout.cu"
#include "generic/GPU/THGenerateCudaFloatTypes.h"
#include "generic/GPU/BatchNormalization.cu"
#include "generic/GPU/THGenerateCudaFloatTypes.h"
#include "generic/GPU/Convolution.cu"
#include "generic/GPU/THGenerateDimCudaFloatTypes.h"
#include "generic/GPU/Deconvolution.cu"
#include "generic/GPU/THGenerateDimCudaFloatTypes.h"
#include "generic/GPU/LeakyReLU.cu"
#include "generic/GPU/THGenerateCudaFloatTypes.h"
#include "generic/GPU/MaxPooling.cu"
#include "generic/GPU/THGenerateDimCudaFloatTypes.h"
#include "generic/GPU/NetworkInNetwork.cu"
#include "generic/GPU/THGenerateCudaFloatTypes.h"
#include "generic/GPU/SparseToDense.cu"
#include "generic/GPU/THGenerateDimCudaFloatTypes.h"
#undef scn_R_
#undef scn_DR_
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
forward_pass_multiplyAdd_count = 0
forward_pass_hidden_states = 0
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from ..utils import *
from .metadata import Metadata
from .inputBatch import InputBatch
from .sparseConvNetTensor import SparseConvNetTensor
from .sparseModule import SparseModule
from .averagePooling import AveragePooling
from .batchNormalization import BatchNormalization
from .concatTable import ConcatTable
from .convolution import Convolution
from .cAddTable import CAddTable
from .deconvolution import Deconvolution
from .identity import Identity
from .joinTable import JoinTable
from .leakyReLU import LeakyReLU
from .maxPooling import MaxPooling
from .networkInNetwork import NetworkInNetwork
from .reLU import ReLU
from .sequential import Sequential
from .sparseToDense import SparseToDense
from .validConvolution import ValidConvolution
from .networkArchitectures import *
from .classificationTrainValidate import ClassificationTrainValidate
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Affine transformation (i.e. the second half of a typical batchnormalization layer)
Parameters:
nPlanes : number of input planes
noise : add multiplicative and additive noise during training if >0.
leakiness : Apply activation function inplace: 0<=leakiness<=1.
0 for ReLU, values in (0,1) for LeakyReLU, 1 for no activation function.
"""
import torch
import sparseconvnet as s
from . import SparseModule
from ..utils import toLongTensor, typed_fn, optionalTensor, nullptr
from .sparseConvNetTensor import SparseConvNetTensor
import math
class AffineReLUTrivialConvolution(SparseModule):
def __init__(self, nIn, nOut, additiveGrad=False):
SparseModule.__init__(self)
self.nIn = nIn
self.nOut = nOut
self.affineWeight = torch.Tensor(nIn).fill_(1)
self.affineBias = torch.Tensor(nIn).zero_()
self.convWeight = torch.Tensor(
nIn, nOut).normal_(
0, math.sqrt(
2.0 / nIn))
self.gradAffineWeight = torch.Tensor(nIn).fill_(0)
self.gradAffineBias = torch.Tensor(nIn).zero_()
self.gradConvWeight = torch.Tensor(nIn, nOut).zero_()
self.additiveGrad = additiveGrad
self.output = SparseConvNetTensor(torch.Tensor())
self.gradInput = torch.Tensor()
def parameters(self):
return [self.affineWeight, self.affineBias, self.convWeight], [
self.gradAffineWeight, self.gradAffineBias, self.gradConvWeight]
def updateOutput(self, input):
self.output.metadata = input.metadata
self.output.spatial_size = input.spatial_size
typed_fn(input, 'AffineReluTrivialConvolution_updateOutput')(
input.features,
self.output.features,
self.affineWeight,
self.affineBias,
self.convWeight)
s.forward_pass_multiplyAdd_count += input.features.size(
0) * self.nIn * self.nOut
s.forward_pass_hidden_states += self.output.features.nelement()
return self.output
def backward(self, input, gradOutput, scale=1):
assert scale == 1
typed_fn(input, 'AffineReluTrivialConvolution_backward')(
input.features,
self.gradInput,
gradOutput,
self.affineWeight,
self.gradAffineWeight,
self.affineBias,
self.gradAffineBias,
self.convWeight,
self.gradConvWeight,
self.additiveGrad)
return self.gradInput
def updateGradInput(self, input, gradOutput):
assert false # just call backward
def accGradParameters(input, gradOutput, scale):
assert false # just call backward
def __repr__(self):
s = 'AffineReluTrivialConvolution ' + \
str(self.nIn) + '->' + str(self.nOut)
return s
def type(self, t=None, tensorCache=None):
if t is None:
return self._type
self._type = t
self.affineWeight = self.affineWeight.type(t)
self.affineBias = self.affineBias.type(t)
self.convWeight = self.convWeight.type(t)
self.gradAffineWeight = self.gradAffineWeight.type(t)
self.gradAffineBias = self.gradAffineBias.type(t)
self.gradConvWeight = self.gradConvWeight.type(t)
self.gradInput = self.gradInput.type(t)
self.output.type(t)
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import sparseconvnet
from . import SparseModule
from ..utils import toLongTensor, dim_typed_fn, optionalTensor, nullptr
from .sparseConvNetTensor import SparseConvNetTensor
class AveragePooling(SparseModule):
def __init__(self, dimension, pool_size, pool_stride, nFeaturesToDrop=0):
SparseModule.__init__(self)
self.dimension = dimension
self.pool_size = toLongTensor(dimension, pool_size)
self.pool_stride = toLongTensor(dimension, pool_stride)
self.pool_volume = self.pool_size.prod()
self.nFeaturesToDrop = nFeaturesToDrop
self.output = SparseConvNetTensor(torch.Tensor())
self.gradInput = torch.Tensor()
def updateOutput(self, input):
self.output.metadata = input.metadata
self.output.spatial_size =\
(input.spatial_size - self.pool_size) / self.pool_stride + 1
dim_typed_fn(self.dimension, input, 'AveragePooling_updateOutput')(
input.spatial_size,
self.output.spatial_size,
self.pool_size,
self.pool_stride,
input.metadata.ffi,
input.features,
self.output.features,
self.nFeaturesToDrop,
torch.cuda.IntTensor() if input.features.is_cuda else nullptr)
return self.output
def updateGradInput(self, input, gradOutput):
dim_typed_fn(
self.dimension, input, 'AveragePooling_updateGradInput')(
input.spatial_size,
self.output.spatial_size,
self.pool_size,
self.pool_stride,
input.metadata.ffi,
input.features,
self.gradInput,
gradOutput,
self.nFeaturesToDrop,
torch.cuda.IntTensor() if input.features.is_cuda else nullptr)
return self.gradInput
def type(self, t=None, tensorCache=None):
if t is None:
return self._type
self.output.type(t)
self.gradInput = self.gradInput.type(t)
def __repr__(self):
s = 'AveragePooling'
if self.pool_size.max() == self.pool_size.min() and\
self.pool_stride.max() == self.pool_stride.min():
s = s + str(self.pool_size[0]) + '/' + str(self.pool_stride[0])
else:
s = s + '(' + str(self.pool_size[0])
for i in self.pool_size[1:]:
s = s + ',' + str(i)
s = s + ')/(' + str(self.pool_stride[0])
for i in self.pool_stride[1:]:
s = s + ',' + str(i)
s = s + ')'
if self.nFeaturesToDrop > 0:
s = s + ' nFeaturesToDrop = ' + self.nFeaturesToDrop
return s
def suggestInputSize(self, out_size):
return (out_size - 1) * self.pool_stride + self.pool_size
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Parameters:
nPlanes : number of input planes
eps : small number used to stabilise standard deviation calculation
momentum : for calculating running average for testing (default 0.9)
affine : only 'true' is supported at present (default 'true')
noise : add multiplicative and additive noise during training if >0.
leakiness : Apply activation def inplace: 0<=leakiness<=1.
0 for ReLU, values in (0,1) for LeakyReLU, 1 for no activation def.
"""
import torch
import sparseconvnet
from . import SparseModule
from ..utils import toLongTensor, typed_fn, optionalTensor, nullptr
from .sparseConvNetTensor import SparseConvNetTensor
class BatchNormalization(SparseModule):
def __init__(
self,
nPlanes,
eps=1e-4,
momentum=0.9,
affine=True,
leakiness=1):
SparseModule.__init__(self)
assert nPlanes % 4 == 0
self.nPlanes = nPlanes
self.eps = eps
self.momentum = momentum
self.affine = affine
self.leakiness = leakiness
self.saveMean = torch.Tensor(nPlanes).fill_(0)
self.saveInvStd = torch.Tensor(nPlanes).fill_(1)
self.runningMean = torch.Tensor(nPlanes).fill_(0)
self.runningVar = torch.Tensor(nPlanes).fill_(1)
if affine:
self.weight = torch.Tensor(nPlanes).fill_(1)
self.bias = torch.Tensor(nPlanes).fill_(0)
self.gradWeight = torch.Tensor(nPlanes)
self.gradBias = torch.Tensor(nPlanes)
self.output = SparseConvNetTensor(torch.Tensor())
self.gradInput = torch.Tensor()
def updateOutput(self, input):
assert input.features.size(1) == self.nPlanes
self.output.metadata = input.metadata
self.output.spatial_size = input.spatial_size
typed_fn(input, 'BatchNormalization_updateOutput')(
input.features,
self.output.features,
self.saveMean,
self.saveInvStd,
self.runningMean,
self.runningVar,
optionalTensor(self, 'weight'),
optionalTensor(self, 'bias'),
self.eps,
self.momentum,
self.train,
self.leakiness)
return self.output
def backward(self, input, gradOutput, scale=1):
assert scale == 1
assert self.train
typed_fn(input, 'BatchNormalization_backward')(
input.features,
self.gradInput,
self.output.features,
gradOutput,
self.saveMean,
self.saveInvStd,
self.runningMean,
self.runningVar,
optionalTensor(self, 'weight'),
optionalTensor(self, 'bias'),
optionalTensor(self, 'gradWeight'),
optionalTensor(self, 'gradBias'),
self.leakiness)
return self.gradInput
def updateGradInput(self, input, gradOutput):
assert false # just call backward
def accGradParameters(self, input, gradOutput, scale):
assert false # just call backward
def type(self, t=None, tensorCache=None):
self.output.type(t)
SparseModule.type(self, t, tensorCache)
def __repr__(self):
s = 'BatchNorm(' + str(self.nPlanes) + ',eps=' + str(self.eps) + \
',momentum=' + str(self.momentum) + ',affine=' + str(self.affine)
if self.leakiness > 0:
s = s + ',leakiness=' + str(self.leakiness)
s = s + ')'
return s
class BatchNormReLU(BatchNormalization):
def __init__(self, nPlanes, eps=1e-4, momentum=0.9):
BatchNormalization.__init__(self, nPlanes, eps, momentum, True, 0)
def __repr__(self):
s = 'BatchNormReLU(' + str(self.nPlanes) + ',eps=' + str(self.eps) + \
',momentum=' + str(self.momentum) + ',affine=' + str(self.affine) + ')'
return s
class BatchNormalizationInTensor(BatchNormalization):
def __init__(
self,
nPlanes,
eps=1e-4,
momentum=0.9,
output_column_offset=0):
BatchNormalization.__init__(self, nPlanes, eps, momentum, False, 1)
self.output_column_offset = output_column_offset
def updateOutput(self, input):
o = self.output.features.narrow(
1, self.output_column_offset, self.nPlanes)
self.output.metadata = input.metadata
self.output.spatial_size = input.spatial_size
typed_fn(input, 'BatchNormalizationInTensor_updateOutput')(
input.features,
o,
self.saveMean,
self.saveInvStd,
self.runningMean,
self.runningVar,
optionalTensor(self, 'weight'),
optionalTensor(self, 'bias'),
self.eps,
self.momentum,
self.train,
self.leakiness)
return self.output
def backward(self, input, gradOutput, scale=1):
assert scale == 1
assert self.train
o = self.output.features.narrow(
1, self.output_column_offset, self.nPlanes)
d_o = gradOutput.narrow(1, self.output_column_offset, self.nPlanes)
typed_fn(input, 'BatchNormalization_backward')(
input.features,
self.gradInput,
o,
d_o,
self.saveMean,
self.saveInvStd,
self.runningMean,
self.runningVar,
optionalTensor(self, 'weight'),
optionalTensor(self, 'bias'),
optionalTensor(self, 'gradWeight'),
optionalTensor(self, 'gradBias'),
self.leakiness)
return self.gradInput
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Assume all the inputs have identical SparseGrids and input[i].nActive
Assume input[0].nPlanes >= input[i].nPlanes for all i=1,#input
output.validRules is taken from input[0].validRules (could do set union?)
(for resnets, make sure the residual link is input[1])
"""
import torch
import sparseconvnet
from . import SparseModule
from ..utils import toLongTensor, dim_typed_fn, optionalTensor, nullptr, set
from .sparseConvNetTensor import SparseConvNetTensor
class CAddTable(SparseModule):
def __init__(self, ip=False):
SparseModule.__init__(self)
self.inplace = ip
if ip:
self.output = None
else:
self.output = SparseConvNetTensor(torch.Tensor())
def updateOutput(self, input):
if self.inplace:
self.output = input[0]
else:
self.output.features.resize_as_(
input[0].features).copy_(
input[0].features)
self.output.metadata = input[0].metadata
self.output.spatial_size = input[0].spatial_size
for i in input[1:]:
self.output.features.narrow(
1, 0, i.features.size(1)).add_(
i.features)
return self.output
def updateGradInput(self, input, gradOutput):
self.gradInput = []
n = input[0].features.size(1)
for i in input:
n_ = i.features.size(1)
if self.inplace and n_ == n:
self.gradInput.append(gradOutput)
else:
self.gradInput.append(gradOutput.narrow(1, 0, n_).clone())
return self.gradInput
def type(self, t, tensorCache=None):
if t and not self.inplace:
self.output.type(t)
def clearState(self):
if self.inplace:
self.output = None
else:
set(self.output)
self.gradInput = None
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from torch.legacy import nn, optim
import sparseconvnet as s
import time
import os
import torch
import math
def updateStats(stats, output, target, loss):
batchSize = output.size(0)
stats['n'] = stats['n'] + batchSize
stats['nll'] = stats['nll'] + loss * batchSize
_, predictions = output.float().sort(1, True)
correct = predictions.eq(
target.long().view(batchSize, 1).expand_as(output))
# Top-1 score
stats['top1'] += correct.narrow(1, 0, 1).sum()
# Top-5 score
l = min(5, correct.size(1))
stats['top5'] += correct.narrow(1, 0, l).sum()
def ClassificationTrainValidate(model, dataset, p):
t = model.type()
if 'nEpochs' not in p:
p['nEpochs'] = 100
if 'initial_LR' not in p:
p['initial_LR'] = 1e-1
if 'LR_decay' not in p:
p['LR_decay'] = 4e-2
if 'weightDecay' not in p:
p['weightDecay'] = 1e-4
if 'momentum' not in p:
p['momentum'] = 0.9
if 'checkPoint' not in p:
p['checkPoint'] = False
optimState = {
'learningRate': p['initial_LR'],
'learningRateDecay': 0.0,
'momentum': p['momentum'],
'nesterov': True,
'dampening': 0.0,
'weightDecay': p['weightDecay'],
'epoch': 1
}
if os.path.isfile('epoch.pth'):
optimState['epoch'] = torch.load('epoch.pth') + 1
print('Restarting at epoch ' +
str(optimState['epoch']) +
' from model.pickle ..')
model = torch.load('model.pth')
print(p)
criterion = nn.CrossEntropyCriterion()
criterion.type(model.type())
params, gradParams = model.flattenParameters()
print('#parameters', params.nelement())
for epoch in range(optimState['epoch'], p['nEpochs'] + 1):
model.training()
stats = {'top1': 0, 'top5': 0, 'n': 0, 'nll': 0}
optimState['learningRate'] = p['initial_LR'] * \
math.exp((1 - epoch) * p['LR_decay'])
start = time.time()
for batch in dataset['train']():
batch['input'].type(t)
batch['target'] = batch['target'].type(t)
model.forward(batch['input'])
criterion.forward(model.output, batch['target'])
updateStats(stats, model.output, batch['target'], criterion.output)
gradParams.zero_() # model:zeroGradParameters()
criterion.backward(model.output, batch['target'])
model.backward(batch['input'], criterion.gradInput)
def feval(x):
return criterion.output, gradParams
optim.sgd(feval, params, optimState)
print(epoch, 'train: top1=%.2f%% top5=%.2f%% nll:%.2f time:%.1fs' %
(100 *
(1 -
1.0 * stats['top1'] /
stats['n']), 100 *
(1 -
1.0 * stats['top5'] /
stats['n']), stats['nll'] /
stats['n'], time.time() -
start))
if p['checkPoint']:
model.modules[0].clearState()
torch.save(model, 'model.pth')
torch.save(epoch, 'epoch.pth')
model.evaluate()
s.forward_pass_multiplyAdd_count = 0
s.forward_pass_hidden_states = 0
stats = {'top1': 0, 'top5': 0, 'n': 0, 'nll': 0}
start = time.time()
for batch in dataset['val']():
batch['input'].type(t)
batch['target'] = batch['target'].type(t)
model.forward(batch['input'])
criterion.forward(model.output, batch['target'])
updateStats(stats, model.output, batch['target'], criterion.output)
print(epoch, 'test: top1=%.2f%% top5=%.2f%% nll:%.2f time:%.1fs' %
(100 *
(1 -
1.0 * stats['top1'] /
stats['n']), 100 *
(1 -
1.0 * stats['top5'] /
stats['n']), stats['nll'] /
stats['n'], time.time() -
start))
print(
'%.3e MultiplyAdds/sample %.3e HiddenStates/sample' %
(s.forward_pass_multiplyAdd_count /
stats['n'],
s.forward_pass_hidden_states /
stats['n']))
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import sparseconvnet
from torch.legacy.nn import ConcatTable as C
from ..utils import toLongTensor, dim_typed_fn, optionalTensor, nullptr, set
from .sparseConvNetTensor import SparseConvNetTensor
class ConcatTable(C):
def __init__(self):
C.__init__(self)
self.gradInput = torch.Tensor()
def updateOutput(self, input):
self.output = []
for m in self.modules:
self.output.append(m.forward(input))
return self.output
def backward(self, input, gradOutput, scale=1):
self.gradInput.resize_as_(input.features).zero_()
for m, g in zip(self.modules, gradOutput):
self.gradInput.add_(m.backward(input, g, scale))
return self.gradInput
def clearState(self):
self.output = None
set(self.gradInput)
for m in self.modules:
m.clearState()
def suggestInputSize(self, nOut):
return self.modules[0].suggestInputSize(nOut)
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from . import SparseModule
import sparseconvnet as s
from ..utils import toLongTensor, dim_typed_fn, optionalTensor, nullptr
from .sparseConvNetTensor import SparseConvNetTensor
class Convolution(SparseModule):
def __init__(self, dimension, nIn, nOut, filter_size, filter_stride, bias):
SparseModule.__init__(self)
self.dimension = dimension
self.nIn = nIn
self.nOut = nOut
self.filter_size = toLongTensor(dimension, filter_size)
self.filter_volume = self.filter_size.prod()
self.filter_stride = toLongTensor(dimension, filter_stride)
self.weight = torch.Tensor(
nIn * self.filter_volume, nOut
).normal_(0, (2.0 / nIn / self.filter_volume)**0.5)
self.gradWeight = torch.Tensor(nIn * self.filter_volume, nOut)
if bias:
self.bias = torch.Tensor(nOut).zero_()
self.gradBias = torch.Tensor(nOut)
self.output = SparseConvNetTensor(torch.Tensor())
self.gradInput = torch.Tensor()
def updateOutput(self, input):
assert input.features.size(1) == self.nIn
self.output.metadata = input.metadata
self.output.spatial_size =\
(input.spatial_size - self.filter_size) / self.filter_stride + 1
s.forward_pass_multiplyAdd_count +=\
dim_typed_fn(
self.dimension, input, 'Convolution_updateOutput')(
input.spatial_size,
self.output.spatial_size,
self.filter_size,
self.filter_stride,
input.metadata.ffi,
input.features,
self.output.features,
self.weight,
optionalTensor(self, 'bias'),
self.filter_volume,
torch.cuda.IntTensor() if input.features.is_cuda else nullptr)
s.forward_pass_hidden_states += self.output.features.nelement()
return self.output
def backward(self, input, gradOutput, scale=1):
assert scale == 1
dim_typed_fn(
self.dimension, input, 'Convolution_backward')(
input.spatial_size,
self.output.spatial_size,
self.filter_size,
self.filter_stride,
input.metadata.ffi,
input.features,
self.gradInput,
gradOutput,
self.weight,
self.gradWeight,
optionalTensor(self, 'gradBias'),
self.filter_volume,
torch.cuda.IntTensor() if input.features.is_cuda else nullptr)
return self.gradInput
def type(self, t=None, tensorCache=None):
if t is None:
return self._type
self._type = t
self.weight = self.weight.type(t)
self.gradWeight = self.gradWeight.type(t)
self.output.type(t)
self.gradInput = self.gradInput.type(t)
if hasattr(self, 'bias'):
self.bias = self.bias.type(t)
self.gradBias = self.gradBias.type(t)
def __repr__(self):
s = 'Convolution ' + str(self.nIn) + '->' + str(self.nOut) + ' C'
if self.filter_size.max() == self.filter_size.min() and\
self.filter_stride.max() == self.filter_stride.min():
s = s + str(self.filter_size[0]) + '/' + str(self.filter_stride[0])
else:
s = s + '(' + str(self.filter_size[0])
for i in self.filter_size[1:]:
s = s + ',' + str(i)
s = s + ')/(' + str(self.filter_stride[0])
for i in self.filter_stride[1:]:
s = s + ',' + str(i)
s = s + ')'
return s
def suggestInputSize(self, out_size):
return (out_size - 1) * self.filter_stride + self.filter_size
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.legacy.nn import Module
import sparseconvnet as s
from . import SparseModule
from ..utils import toLongTensor, dim_typed_fn, optionalTensor, nullptr
from .sparseConvNetTensor import SparseConvNetTensor
class Deconvolution(SparseModule):
def __init__(self, dimension, nIn, nOut,
filter_size, filter_stride, bias):
SparseModule.__init__(self)
self.dimension = dimension
self.nIn = nIn
self.nOut = nOut
self.filter_size = toLongTensor(dimension, filter_size)
self.filter_stride = toLongTensor(dimension, filter_stride)
self.filter_volume = self.filter_size.prod()
self.weight = torch.Tensor(
nIn * self.filter_volume, nOut
).normal_(0, (2.0 / nIn / self.filter_volume)**0.5)
self.gradWeight = torch.Tensor(nIn * self.filter_volume, nOut)
if bias:
self.bias = torch.Tensor(nOut).zero_()
self.gradBias = torch.Tensor(nOut)
self.output = SparseConvNetTensor(torch.Tensor())
self.gradInput = torch.Tensor()
def updateOutput(self, input):
assert input.features.size(1) == self.nIn
self.output.metadata = input.metadata
self.output.spatial_size =\
(input.spatial_size - 1) * self.filter_stride + self.filter_size
s.forward_pass_multiplyAdd_count +=\
dim_typed_fn(
self.dimension, input, 'Deconvolution_updateOutput')(
input.spatial_size,
self.output.spatial_size,
self.filter_size,
self.filter_stride,
input.metadata.ffi,
input.features,
self.output.features,
self.weight,
optionalTensor(self, 'bias'),
self.filter_volume,
torch.cuda.IntTensor() if input.features.is_cuda else nullptr)
s.forward_pass_hidden_states += self.output.features.nelement()
return self.output
def backward(self, input, gradOutput, scale=1):
assert scale == 1
dim_typed_fn(
self.dimension, input, 'Deconvolution_backward')(
input.spatial_size,
self.output.spatial_size,
self.filter_size,
self.filter_stride,
input.metadata.ffi,
input.features,
self.gradInput,
gradOutput,
self.weight,
self.gradWeight,
optionalTensor(self, 'gradBias'),
self.filter_volume,
torch.cuda.IntTensor() if input.features.is_cuda else nullptr)
return self.gradInput
def type(self, t=None, tensorCache=None):
if t is None:
return self._type
self._type = t
self.weight = self.weight.type(t)
self.gradWeight = self.gradWeight.type(t)
self.output.type(t)
self.gradInput = self.gradInput.type(t)
if hasattr(self, 'bias'):
self.bias = self.bias.type(t)
self.gradBias = self.gradBias.type(t)
def __repr__(self):
s = 'Deconvolution ' + str(self.nIn) + '->' + str(self.nOut) + ' C'
if self.filter_size.max() == self.filter_size.min() and\
self.filter_stride.max() == self.filter_stride.min():
s = s + str(self.filter_size[0]) + '/' + str(self.filter_stride[0])
else:
s = s + '(' + str(self.filter_size[0])
for i in self.filter_size[1:]:
s = s + ',' + str(i)
s = s + ')/(' + str(self.filter_stride[0])
for i in self.filter_stride[1:]:
s = s + ',' + str(i)
s = s + ')'
return s
def suggestInputSize(self, out_size):
(out_size - self.filter_size) / self.filter_stride + 1
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import sparseconvnet as s
from torch.legacy.nn import Container
from ..utils import toLongTensor, typed_fn, optionalTensor, nullptr, set
from .sparseConvNetTensor import SparseConvNetTensor
from .batchNormalization import *
from .affineReLUTrivialConvolution import AffineReLUTrivialConvolution
from .validConvolution import ValidConvolution
import math
class DenseNetBlock(Container):
def __init__(self, dimension, nInputPlanes, nExtraLayers=2, growthRate=16):
Container.__init__(self)
self.dimension = dimension
self.nInputPlanes = nInputPlanes
self.nExtraLayers = nExtraLayers
self.growthRate = growthRate
assert(self.nExtraLayers >= 1)
self.nOutputPlanes = nInputPlanes + nExtraLayers * growthRate
self.output = SparseConvNetTensor(torch.Tensor())
# Module 1: Batchnorm the input into the start of self.output
self.add(
BatchNormalizationInTensor(
nInputPlanes,
output_column_offset=0))
self.modules[0].output = self.output
self.gradInput = self.modules[0].gradInput
for i in range(nExtraLayers):
nFeatures = self.nInputPlanes + i * growthRate
nFeaturesB = 4 * growthRate
# Modules 4*i+1
self.add(AffineReLUTrivialConvolution(nFeatures, nFeaturesB, True))
# Module 4*i+2
self.add(BatchNormalization(nFeaturesB))
# Module 4*i+3
self.add(
ValidConvolution(
dimension,
nFeaturesB,
growthRate,
3,
False))
# Module 4*i+4
self.add(
BatchNormalizationInTensor(
growthRate,
output_column_offset=self.nInputPlanes +
i *
growthRate))
self.modules[4 * i + 4].output = self.output
def updateOutput(self, input):
assert input.features.size(1) == self.nInputPlanes
self.output.spatial_size = input.spatial_size
self.output.metadata = input.metadata
self.output.features.resize_(
input.features.size(0), self.nOutputPlanes)
i = input
for m in self.modules:
i = m.updateOutput(i)
return self.output
def backward(self, input, gradOutput, scale=1):
assert scale == 1
g = gradOutput
for i in range(self.nExtraLayers):
self.modules[4 * i + 1].gradInput = gradOutput
for m, m_ in zip(self.modules[:0:-1],
self.modules[len(self.modules) - 2::-1]):
g = m.backward(m_.output, g)
self.modules[0].backward(input, g)
return self.gradInput
def type(self, type, tensorCache=None):
self._type = type
self.output.features = self.output.features.type(type)
for x in self.modules:
x.type(type)
self.gradInput = self.modules[0].gradInput
def __repr__(self):
s = 'DenseNetBlock(' + str(self.nInputPlanes) + '->' + str(self.nInputPlanes) + '+' + str(
self.nExtraLayers) + '*' + str(self.growthRate) + '=' + str(self.nOutputPlanes) + ')'
return s
def clearState(self):
for _, m in ipairs(self.modules):
m.clearState()
set(self.output)
set(self.gradInput)
def suggestInputSize(self, out_size):
return out_size
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from torch.legacy.nn import Identity as I
from .sparseModule import SparseModule
class Identity(SparseModule):
def forward(self, input):
self.output = input
return self.output
def backward(self, input, gradOutput, scale=1):
self.gradInput = gradOutput
return self.gradInput
def clearState(self):
self.output = None
self.gradInput = None
def suggestInputSize(self, out_size):
return out_size
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
from .metadata import Metadata
from ..utils import toLongTensor, dim_fn
from .sparseConvNetTensor import SparseConvNetTensor
class InputBatch(SparseConvNetTensor):
def __init__(self, dimension, spatial_size):
self.dimension = dimension
self.spatial_size = toLongTensor(dimension, spatial_size)
SparseConvNetTensor.__init__(self, None, None, spatial_size)
self.features = torch.FloatTensor()
self.metadata = Metadata(dimension)
dim_fn(dimension, 'setInputSpatialSize')(
self.metadata.ffi, self.spatial_size)
def addSample(self):
dim_fn(self.dimension, 'batchAddSample')(
self.metadata.ffi)
def setLocation(self, location, vector, overwrite=False):
assert location.min() >= 0 and (self.spatial_size - location).min() > 0
dim_fn(self.dimension, 'setInputSpatialLocation')(
self.metadata.ffi, self.features, location, vector, overwrite)
def setLocation_(self, location, vector, overwrite=False):
dim_fn(self.dimension, 'setInputSpatialLocation')(
self.metadata.ffi, self.features, location, vector, overwrite)
def addSampleFromTensor(self, tensor, offset, threshold=0):
self.nActive = dim_fn(
self.dimension,
'addSampleFromThresholdedTensor')(
self.metadata.ffi,
self.features,
tensor,
offset,
self.spatial_size,
threshold)
def precomputeMetadata(self, stride):
if stride == 2:
dim_fn(self.dimension, 'generateRuleBooks2s2')(self.metadata.ffi)
else:
dim_fn(self.dimension, 'generateRuleBooks3s2')(self.metadata.ffi)
def __repr__(self):
return 'InputBatch<<' + repr(self.features) + repr(self.metadata) + \
repr(self.spatial_size) + '>>'
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import sparseconvnet
from . import SparseModule
from ..utils import toLongTensor, dim_typed_fn, optionalTensor, nullptr, set
from .sparseConvNetTensor import SparseConvNetTensor
class JoinTable(SparseModule):
def __init__(self, nPlanes):
SparseModule.__init__(self)
self.nPlanes = nPlanes
self.gradInput = [torch.Tensor() for p in nPlanes]
self.nOutputPlanes = sum(nPlanes)
self.output = SparseConvNetTensor(torch.Tensor())
def updateOutput(self, input):
self.output.features.resize_(
input[0].features.size(0),
self.nOutputPlanes)
self.output.metadata = input[0].metadata
self.output.spatial_size = input[0].spatial_size
offset = 0
for i, n in zip(input, self.nPlanes):
self.output.features.narrow(1, offset, n).copy_(i.features)
offset += n
return self.output
def updateGradInput(self, input, gradOutput):
offset = 0
a = input[0].features.size(0)
for g, n in zip(self.gradInput, self.nPlanes):
g.resize_(a, n).copy_(gradOutput.narrow(1, offset, n))
offset += n
return self.gradInput
def type(self, t, tensorCache=None):
if t:
self.output.type(t)
self.gradInput = [g.type(t) for g in self.gradInput]
def clearState(self):
set(self.output)
for g in self.gradInput:
set(g)
def __repr__(self):
s = 'JoinTable: ' + str(self.nPlanes[0])
for n in self.nPlanes[1:]:
s = s + ' + ' + str(n)
s = s + ' -> ' + str(self.nOutputPlanes)
return s
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import sparseconvnet
from . import SparseModule
from ..utils import toLongTensor, typed_fn, optionalTensor, nullptr
from .sparseConvNetTensor import SparseConvNetTensor
class LeakyReLU(SparseModule):
def __init__(self, leakage=0.333, ip=True):
SparseModule.__init__(self)
self.inplace = ip
self.leakage = leakage
self.output = SparseConvNetTensor(torch.Tensor())
self.gradInput = None if ip else torch.Tensor()
def updateOutput(self, input):
self.output.metadata = input.metadata
self.output.spatial_size = input.spatial_size
typed_fn(input, 'LeakyReLU_updateOutput')(
input.features,
self.output.features,
self.leakage)
return self.output
def updateGradInput(self, input, gradOutput):
if self.inplace:
self.gradInput = gradOutput
typed_fn(input.features, 'LeakyReLU_updateGradInput')(
input.features,
self.gradInput,
gradOutput,
self.leakage)
return self.gradInput
def type(self, t, tensorCache=None):
if t:
self.output.type(t)
self.gradInput = self.gradInput.type(t)
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import sparseconvnet
from . import SparseModule
from ..utils import toLongTensor, dim_typed_fn, optionalTensor, nullptr
from .sparseConvNetTensor import SparseConvNetTensor
class MaxPooling(SparseModule):
def __init__(self, dimension, pool_size, pool_stride, nFeaturesToDrop=0):
SparseModule.__init__(self)
self.dimension = dimension
self.pool_size = toLongTensor(dimension, pool_size)
self.pool_stride = toLongTensor(dimension, pool_stride)
self.pool_volume = self.pool_size.prod()
self.nFeaturesToDrop = nFeaturesToDrop or 0
self.output = SparseConvNetTensor(torch.Tensor())
self.gradInput = torch.Tensor()
def updateOutput(self, input):
self.output.metadata = input.metadata
self.output.spatial_size =\
(input.spatial_size - self.pool_size) / self.pool_stride + 1
dim_typed_fn(self.dimension, input, 'MaxPooling_updateOutput')(
input.spatial_size,
self.output.spatial_size,
self.pool_size,
self.pool_stride,
input.metadata.ffi,
input.features,
self.output.features,
self.nFeaturesToDrop,
torch.cuda.IntTensor() if input.features.is_cuda else nullptr)
return self.output
def updateGradInput(self, input, gradOutput):
dim_typed_fn(self.dimension, input, 'MaxPooling_updateGradInput')(
input.spatial_size,
self.output.spatial_size,
self.pool_size,
self.pool_stride,
input.metadata.ffi,
input.features,
self.gradInput,
self.output.features,
gradOutput,
self.nFeaturesToDrop,
torch.cuda.IntTensor() if input.features.is_cuda else nullptr)
return self.gradInput
def type(self, t=None, tensorCache=None):
if t is None:
return self._type
self.output.type(t)
self.gradInput = self.gradInput.type(t)
def __repr__(self):
s = 'MaxPooling'
if self.pool_size.max() == self.pool_size.min() and\
self.pool_stride.max() == self.pool_stride.min():
s = s + str(self.pool_size[0]) + '/' + str(self.pool_stride[0])
else:
s = s + '(' + str(self.pool_size[0])
for i in self.pool_size[1:]:
s = s + ',' + str(i)
s = s + ')/(' + str(self.pool_stride[0])
for i in self.pool_stride[1:]:
s = s + ',' + str(i)
s = s + ')'
if self.nFeaturesToDrop > 0:
s = s + ' nFeaturesToDrop = ' + self.nFeaturesToDrop
return s
def suggestInputSize(self, out_size):
return (out_size - 1) * self.pool_stride + self.pool_size
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment