Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
SparseConvNet
Commits
7470fd58
Commit
7470fd58
authored
Sep 15, 2017
by
Ben Graham
Committed by
GitHub
Sep 15, 2017
Browse files
Merge pull request #6 from gnedster/batchwise_dropout
Create BatchwiseDropout Layer
parents
c8e8db32
c74697aa
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
101 additions
and
12 deletions
+101
-12
PyTorch/sparseconvnet/SCN/header_cpu.h
PyTorch/sparseconvnet/SCN/header_cpu.h
+4
-8
PyTorch/sparseconvnet/SCN/header_gpu.h
PyTorch/sparseconvnet/SCN/header_gpu.h
+2
-4
PyTorch/sparseconvnet/legacy/__init__.py
PyTorch/sparseconvnet/legacy/__init__.py
+1
-0
PyTorch/sparseconvnet/legacy/batchwiseDropout.py
PyTorch/sparseconvnet/legacy/batchwiseDropout.py
+94
-0
No files found.
PyTorch/sparseconvnet/SCN/header_cpu.h
View file @
7470fd58
...
...
@@ -190,12 +190,10 @@ void scn_cpu_float_AffineReluTrivialConvolution_backward(
// BatchwiseMultiplicativeDropout
void
scn_cpu_float_BatchwiseMultiplicativeDropout_updateOutput
(
THFloatTensor
*
input_features
,
THFloatTensor
*
output_features
,
THFloatTensor
*
noise
,
long
nPlanes
,
long
input_stride
,
long
output_stride
,
float
alpha
);
THFloatTensor
*
noise
,
float
alpha
);
void
scn_cpu_float_BatchwiseMultiplicativeDropout_updateGradInput
(
THFloatTensor
*
input_features
,
THFloatTensor
*
d_input_features
,
THFloatTensor
*
d_output_features
,
THFloatTensor
*
noise
,
long
nPlanes
,
long
input_stride
,
long
output_stride
,
float
alpha
);
THFloatTensor
*
d_output_features
,
THFloatTensor
*
noise
,
float
alpha
);
// BatchNormalization
void
scn_cpu_float_BatchNormalization_updateOutput
(
...
...
@@ -246,12 +244,10 @@ void scn_cpu_double_AffineReluTrivialConvolution_backward(
// BatchwiseMultiplicativeDropout
void
scn_cpu_double_BatchwiseMultiplicativeDropout_updateOutput
(
THDoubleTensor
*
input_features
,
THDoubleTensor
*
output_features
,
THDoubleTensor
*
noise
,
long
nPlanes
,
long
input_stride
,
long
output_stride
,
float
alpha
);
THDoubleTensor
*
noise
,
float
alpha
);
void
scn_cpu_double_BatchwiseMultiplicativeDropout_updateGradInput
(
THDoubleTensor
*
input_features
,
THDoubleTensor
*
d_input_features
,
THDoubleTensor
*
d_output_features
,
THDoubleTensor
*
noise
,
long
nPlanes
,
long
input_stride
,
long
output_stride
,
float
alpha
);
THDoubleTensor
*
d_output_features
,
THDoubleTensor
*
noise
,
float
alpha
);
// BatchNormalization
void
scn_cpu_double_BatchNormalization_updateOutput
(
...
...
PyTorch/sparseconvnet/SCN/header_gpu.h
View file @
7470fd58
...
...
@@ -16,12 +16,10 @@ void scn_gpu_float_AffineReluTrivialConvolution_backward(
// BatchwiseMultiplicativeDropout
void
scn_gpu_float_BatchwiseMultiplicativeDropout_updateOutput
(
THCudaTensor
*
input_features
,
THCudaTensor
*
output_features
,
THCudaTensor
*
noise
,
long
nPlanes
,
long
input_stride
,
long
output_stride
,
float
alpha
);
THCudaTensor
*
noise
,
float
alpha
);
void
scn_gpu_float_BatchwiseMultiplicativeDropout_updateGradInput
(
THCudaTensor
*
input_features
,
THCudaTensor
*
d_input_features
,
THCudaTensor
*
d_output_features
,
THCudaTensor
*
noise
,
long
nPlanes
,
long
input_stride
,
long
output_stride
,
float
alpha
);
THCudaTensor
*
d_output_features
,
THCudaTensor
*
noise
,
float
alpha
);
// BatchNormalization
void
scn_gpu_float_BatchNormalization_updateOutput
(
...
...
PyTorch/sparseconvnet/legacy/__init__.py
View file @
7470fd58
...
...
@@ -11,6 +11,7 @@ from .sparseConvNetTensor import SparseConvNetTensor
from
.sparseModule
import
SparseModule
from
.averagePooling
import
AveragePooling
from
.batchNormalization
import
BatchNormalization
,
BatchNormReLU
,
BatchNormLeakyReLU
,
BatchNormalizationInTensor
from
.batchwiseDropout
import
BatchwiseDropout
from
.concatTable
import
ConcatTable
from
.convolution
import
Convolution
from
.cAddTable
import
CAddTable
...
...
PyTorch/sparseconvnet/legacy/batchwiseDropout.py
0 → 100644
View file @
7470fd58
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Implementation of batchwise dropout, optionally followed by LeakyReLU
Parameters:
nPlanes: number of input planes
p : dropout probability in the range [0,1]
ip : perform dropout inplace (default true)
leaky : in the range [0,1]. Set to zero to do ReLU after the dropout. Set to one
just to do dropout. Set to 1/3 for LeakyReLU after the dropout, etc. (default 1)
"""
import
torch
import
sparseconvnet
from
.
import
SparseModule
from
..utils
import
toLongTensor
,
typed_fn
from
.sparseConvNetTensor
import
SparseConvNetTensor
class
BatchwiseDropout
(
SparseModule
):
def
__init__
(
self
,
nPlanes
,
p
,
ip
=
True
,
leaky
=
1
):
self
.
inplace
=
ip
self
.
p
=
p
self
.
leakiness
=
leaky
self
.
noise
=
torch
.
Tensor
(
nPlanes
)
self
.
output
=
None
if
ip
else
SparseConvNetTensor
(
torch
.
Tensor
())
self
.
gradInput
=
None
if
ip
else
torch
.
Tensor
()
def
updateOutput
(
self
,
input
):
if
self
.
train
:
self
.
noise
.
bernoulli_
(
1
-
self
.
p
)
else
:
self
.
noise
.
fill_
(
1
-
self
.
p
)
if
self
.
inplace
:
self
.
output
=
input
else
:
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatialSize
=
input
.
spatialSize
typed_fn
(
input
.
features
,
'BatchwiseMultiplicativeDropout_updateOutput'
)(
input
.
features
,
self
.
output
.
features
,
self
.
noise
,
self
.
leakiness
)
return
self
.
output
def
updateGradInput
(
self
,
input
,
gradOutput
):
if
self
.
inplace
:
self
.
gradInput
=
gradOutput
typed_fn
(
input
.
features
,
'BatchwiseMultiplicativeDropout_updateGradInput'
)(
input
.
features
,
self
.
gradInput
,
gradOutput
,
self
.
noise
,
self
.
leakiness
)
def
type
(
self
,
t
,
tensorCache
=
None
):
self
.
noise
.
type
(
t
)
if
not
self
.
inplace
:
self
.
output
.
features
.
type
(
t
)
self
.
gradInput
.
features
.
type
(
t
)
SparseModule
.
type
(
self
,
t
,
tensorCache
)
def
clearState
(
self
):
if
self
.
inPlace
:
self
.
output
=
None
self
.
gradOutput
=
None
else
:
SparseModule
.
clearState
(
self
)
def
__repr__
(
self
):
s
=
'BatchwiseDropout('
+
str
(
self
.
nPlanes
)
+
',p='
+
str
(
self
.
p
)
+
\
',ip='
+
str
(
self
.
inplace
)
if
self
.
leakiness
>
0
:
s
=
s
+
',leakiness='
+
str
(
self
.
leakiness
)
s
=
s
+
')'
return
s
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment