Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
SparseConvNet
Commits
df96d0c0
Commit
df96d0c0
authored
Sep 14, 2017
by
Ed Ng
Browse files
Create BatchwiseDropout Layer
Update GPU Header
parent
6de372c3
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
101 additions
and
12 deletions
+101
-12
PyTorch/sparseconvnet/SCN/header_cpu.h
PyTorch/sparseconvnet/SCN/header_cpu.h
+4
-8
PyTorch/sparseconvnet/SCN/header_gpu.h
PyTorch/sparseconvnet/SCN/header_gpu.h
+2
-4
PyTorch/sparseconvnet/legacy/__init__.py
PyTorch/sparseconvnet/legacy/__init__.py
+1
-0
PyTorch/sparseconvnet/legacy/batchwiseDropout.py
PyTorch/sparseconvnet/legacy/batchwiseDropout.py
+94
-0
No files found.
PyTorch/sparseconvnet/SCN/header_cpu.h
View file @
df96d0c0
...
@@ -190,12 +190,10 @@ void scn_cpu_float_AffineReluTrivialConvolution_backward(
...
@@ -190,12 +190,10 @@ void scn_cpu_float_AffineReluTrivialConvolution_backward(
// BatchwiseMultiplicativeDropout
// BatchwiseMultiplicativeDropout
void
scn_cpu_float_BatchwiseMultiplicativeDropout_updateOutput
(
void
scn_cpu_float_BatchwiseMultiplicativeDropout_updateOutput
(
THFloatTensor
*
input_features
,
THFloatTensor
*
output_features
,
THFloatTensor
*
input_features
,
THFloatTensor
*
output_features
,
THFloatTensor
*
noise
,
long
nPlanes
,
long
input_stride
,
long
output_stride
,
THFloatTensor
*
noise
,
float
alpha
);
float
alpha
);
void
scn_cpu_float_BatchwiseMultiplicativeDropout_updateGradInput
(
void
scn_cpu_float_BatchwiseMultiplicativeDropout_updateGradInput
(
THFloatTensor
*
input_features
,
THFloatTensor
*
d_input_features
,
THFloatTensor
*
input_features
,
THFloatTensor
*
d_input_features
,
THFloatTensor
*
d_output_features
,
THFloatTensor
*
noise
,
long
nPlanes
,
THFloatTensor
*
d_output_features
,
THFloatTensor
*
noise
,
float
alpha
);
long
input_stride
,
long
output_stride
,
float
alpha
);
// BatchNormalization
// BatchNormalization
void
scn_cpu_float_BatchNormalization_updateOutput
(
void
scn_cpu_float_BatchNormalization_updateOutput
(
...
@@ -246,12 +244,10 @@ void scn_cpu_double_AffineReluTrivialConvolution_backward(
...
@@ -246,12 +244,10 @@ void scn_cpu_double_AffineReluTrivialConvolution_backward(
// BatchwiseMultiplicativeDropout
// BatchwiseMultiplicativeDropout
void
scn_cpu_double_BatchwiseMultiplicativeDropout_updateOutput
(
void
scn_cpu_double_BatchwiseMultiplicativeDropout_updateOutput
(
THDoubleTensor
*
input_features
,
THDoubleTensor
*
output_features
,
THDoubleTensor
*
input_features
,
THDoubleTensor
*
output_features
,
THDoubleTensor
*
noise
,
long
nPlanes
,
long
input_stride
,
long
output_stride
,
THDoubleTensor
*
noise
,
float
alpha
);
float
alpha
);
void
scn_cpu_double_BatchwiseMultiplicativeDropout_updateGradInput
(
void
scn_cpu_double_BatchwiseMultiplicativeDropout_updateGradInput
(
THDoubleTensor
*
input_features
,
THDoubleTensor
*
d_input_features
,
THDoubleTensor
*
input_features
,
THDoubleTensor
*
d_input_features
,
THDoubleTensor
*
d_output_features
,
THDoubleTensor
*
noise
,
long
nPlanes
,
THDoubleTensor
*
d_output_features
,
THDoubleTensor
*
noise
,
float
alpha
);
long
input_stride
,
long
output_stride
,
float
alpha
);
// BatchNormalization
// BatchNormalization
void
scn_cpu_double_BatchNormalization_updateOutput
(
void
scn_cpu_double_BatchNormalization_updateOutput
(
...
...
PyTorch/sparseconvnet/SCN/header_gpu.h
View file @
df96d0c0
...
@@ -16,12 +16,10 @@ void scn_gpu_float_AffineReluTrivialConvolution_backward(
...
@@ -16,12 +16,10 @@ void scn_gpu_float_AffineReluTrivialConvolution_backward(
// BatchwiseMultiplicativeDropout
// BatchwiseMultiplicativeDropout
void
scn_gpu_float_BatchwiseMultiplicativeDropout_updateOutput
(
void
scn_gpu_float_BatchwiseMultiplicativeDropout_updateOutput
(
THCudaTensor
*
input_features
,
THCudaTensor
*
output_features
,
THCudaTensor
*
input_features
,
THCudaTensor
*
output_features
,
THCudaTensor
*
noise
,
long
nPlanes
,
long
input_stride
,
long
output_stride
,
THCudaTensor
*
noise
,
float
alpha
);
float
alpha
);
void
scn_gpu_float_BatchwiseMultiplicativeDropout_updateGradInput
(
void
scn_gpu_float_BatchwiseMultiplicativeDropout_updateGradInput
(
THCudaTensor
*
input_features
,
THCudaTensor
*
d_input_features
,
THCudaTensor
*
input_features
,
THCudaTensor
*
d_input_features
,
THCudaTensor
*
d_output_features
,
THCudaTensor
*
noise
,
long
nPlanes
,
THCudaTensor
*
d_output_features
,
THCudaTensor
*
noise
,
float
alpha
);
long
input_stride
,
long
output_stride
,
float
alpha
);
// BatchNormalization
// BatchNormalization
void
scn_gpu_float_BatchNormalization_updateOutput
(
void
scn_gpu_float_BatchNormalization_updateOutput
(
...
...
PyTorch/sparseconvnet/legacy/__init__.py
View file @
df96d0c0
...
@@ -11,6 +11,7 @@ from .sparseConvNetTensor import SparseConvNetTensor
...
@@ -11,6 +11,7 @@ from .sparseConvNetTensor import SparseConvNetTensor
from
.sparseModule
import
SparseModule
from
.sparseModule
import
SparseModule
from
.averagePooling
import
AveragePooling
from
.averagePooling
import
AveragePooling
from
.batchNormalization
import
BatchNormalization
from
.batchNormalization
import
BatchNormalization
from
.batchwiseDropout
import
BatchwiseDropout
from
.concatTable
import
ConcatTable
from
.concatTable
import
ConcatTable
from
.convolution
import
Convolution
from
.convolution
import
Convolution
from
.cAddTable
import
CAddTable
from
.cAddTable
import
CAddTable
...
...
PyTorch/sparseconvnet/legacy/batchwiseDropout.py
0 → 100644
View file @
df96d0c0
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Implementation of batchwise dropout, optionally followed by LeakyReLU
Parameters:
nPlanes: number of input planes
p : dropout probability in the range [0,1]
ip : perform dropout inplace (default true)
leaky : in the range [0,1]. Set to zero to do ReLU after the dropout. Set to one
just to do dropout. Set to 1/3 for LeakyReLU after the dropout, etc. (default 1)
"""
import
torch
import
sparseconvnet
from
.
import
SparseModule
from
..utils
import
toLongTensor
,
typed_fn
from
.sparseConvNetTensor
import
SparseConvNetTensor
class
BatchwiseDropout
(
SparseModule
):
def
__init__
(
self
,
nPlanes
,
p
,
ip
=
True
,
leaky
=
1
):
self
.
inplace
=
ip
self
.
p
=
p
self
.
leakiness
=
leaky
self
.
noise
=
torch
.
Tensor
(
nPlanes
)
self
.
output
=
None
if
ip
else
SparseConvNetTensor
(
torch
.
Tensor
())
self
.
gradInput
=
None
if
ip
else
torch
.
Tensor
()
def
updateOutput
(
self
,
input
):
if
self
.
train
:
self
.
noise
.
bernoulli_
(
1
-
self
.
p
)
else
:
self
.
noise
.
fill_
(
1
-
self
.
p
)
if
self
.
inplace
:
self
.
output
=
input
else
:
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatialSize
=
input
.
spatialSize
typed_fn
(
input
,
'BatchwiseMultiplicativeDropout_updateOutput'
)(
input
.
features
,
self
.
output
.
features
,
self
.
noise
,
self
.
leakiness
)
return
self
.
output
def
updateGradInput
(
self
,
input
,
gradOutput
):
if
self
.
inplace
:
self
.
gradInput
=
gradOutput
typed_fn
(
input
,
'BatchwiseMultiplicativeDropout_updateGradInput'
)(
input
.
features
,
self
.
gradInput
,
gradOutput
,
self
.
noise
,
self
.
leakiness
)
def
type
(
self
,
t
,
tensorCache
=
None
):
self
.
noise
.
type
(
t
)
if
not
self
.
inplace
:
self
.
output
.
features
.
type
(
t
)
self
.
gradInput
.
features
.
type
(
t
)
SparseModule
.
type
(
self
,
t
,
tensorCache
)
def
clearState
(
self
):
if
self
.
inPlace
:
self
.
output
=
None
self
.
gradOutput
=
None
else
:
SparseModule
.
clearState
(
self
)
def
__repr__
(
self
):
s
=
'BatchwiseDropout('
+
str
(
self
.
nPlanes
)
+
',p='
+
str
(
self
.
p
)
+
\
',ip='
+
str
(
self
.
inplace
)
if
self
.
leakiness
>
0
:
s
=
s
+
',leakiness='
+
str
(
self
.
leakiness
)
s
=
s
+
')'
return
s
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment