Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
SparseConvNet
Commits
209e1e78
Commit
209e1e78
authored
Oct 03, 2017
by
Benjamin Thomas Graham
Browse files
Merge branch 'master' of github.com:facebookresearch/SparseConvNet
parents
d4a8a1fb
82bcd1db
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
56 additions
and
3 deletions
+56
-3
PyTorch/sparseconvnet/legacy/__init__.py
PyTorch/sparseconvnet/legacy/__init__.py
+1
-1
PyTorch/sparseconvnet/legacy/batchwiseDropout.py
PyTorch/sparseconvnet/legacy/batchwiseDropout.py
+54
-1
PyTorch/sparseconvnet/legacy/validConvolution.py
PyTorch/sparseconvnet/legacy/validConvolution.py
+1
-1
No files found.
PyTorch/sparseconvnet/legacy/__init__.py
View file @
209e1e78
...
@@ -11,7 +11,7 @@ from ..sparseConvNetTensor import SparseConvNetTensor
...
@@ -11,7 +11,7 @@ from ..sparseConvNetTensor import SparseConvNetTensor
from
.sparseModule
import
SparseModule
from
.sparseModule
import
SparseModule
from
.averagePooling
import
AveragePooling
from
.averagePooling
import
AveragePooling
from
.batchNormalization
import
BatchNormalization
,
BatchNormReLU
,
BatchNormLeakyReLU
,
BatchNormalizationInTensor
from
.batchNormalization
import
BatchNormalization
,
BatchNormReLU
,
BatchNormLeakyReLU
,
BatchNormalizationInTensor
from
.batchwiseDropout
import
BatchwiseDropout
from
.batchwiseDropout
import
BatchwiseDropout
,
BatchwiseDropoutInTensor
from
.concatTable
import
ConcatTable
from
.concatTable
import
ConcatTable
from
.convolution
import
Convolution
from
.convolution
import
Convolution
from
.cAddTable
import
CAddTable
from
.cAddTable
import
CAddTable
...
...
PyTorch/sparseconvnet/legacy/batchwiseDropout.py
View file @
209e1e78
...
@@ -33,6 +33,7 @@ class BatchwiseDropout(SparseModule):
...
@@ -33,6 +33,7 @@ class BatchwiseDropout(SparseModule):
self
.
p
=
p
self
.
p
=
p
self
.
leakiness
=
leaky
self
.
leakiness
=
leaky
self
.
noise
=
torch
.
Tensor
(
nPlanes
)
self
.
noise
=
torch
.
Tensor
(
nPlanes
)
self
.
nPlanes
=
nPlanes
self
.
output
=
None
if
ip
else
SparseConvNetTensor
(
torch
.
Tensor
())
self
.
output
=
None
if
ip
else
SparseConvNetTensor
(
torch
.
Tensor
())
self
.
gradInput
=
None
if
ip
else
torch
.
Tensor
()
self
.
gradInput
=
None
if
ip
else
torch
.
Tensor
()
...
@@ -74,7 +75,7 @@ class BatchwiseDropout(SparseModule):
...
@@ -74,7 +75,7 @@ class BatchwiseDropout(SparseModule):
if
not
self
.
inplace
:
if
not
self
.
inplace
:
self
.
output
.
features
.
type
(
t
)
self
.
output
.
features
.
type
(
t
)
self
.
gradInput
.
features
.
type
(
t
)
self
.
gradInput
.
type
(
t
)
SparseModule
.
type
(
self
,
t
,
tensorCache
)
SparseModule
.
type
(
self
,
t
,
tensorCache
)
...
@@ -92,3 +93,55 @@ class BatchwiseDropout(SparseModule):
...
@@ -92,3 +93,55 @@ class BatchwiseDropout(SparseModule):
s
=
s
+
',leakiness='
+
str
(
self
.
leakiness
)
s
=
s
+
',leakiness='
+
str
(
self
.
leakiness
)
s
=
s
+
')'
s
=
s
+
')'
return
s
return
s
class
BatchwiseDropoutInTensor
(
BatchwiseDropout
):
def
__init__
(
self
,
nPlanes
,
p
,
output_column_offset
=
0
,
leaky
=
1
):
BatchwiseDropout
.
__init__
(
self
,
nPlanes
,
p
,
False
,
leaky
)
self
.
output_column_offset
=
output_column_offset
def
updateOutput
(
self
,
input
):
if
self
.
train
:
self
.
noise
.
bernoulli_
(
1
-
self
.
p
)
else
:
self
.
noise
.
fill_
(
1
-
self
.
p
)
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatial_size
=
input
.
spatial_size
o
=
self
.
output
.
features
.
narrow
(
1
,
self
.
output_column_offset
,
self
.
nPlanes
)
typed_fn
(
input
.
features
,
'BatchwiseMultiplicativeDropout_updateOutput'
)(
input
.
features
,
o
,
self
.
noise
,
self
.
leakiness
)
return
self
.
output
def
updateGradInput
(
self
,
input
,
gradOutput
):
assert
self
.
train
d_o
=
gradOutput
.
narrow
(
1
,
self
.
output_column_offset
,
self
.
nPlanes
)
typed_fn
(
input
.
features
,
'BatchwiseMultiplicativeDropout_updateGradInput'
)(
input
.
features
,
self
.
gradInput
,
d_o
,
self
.
noise
,
self
.
leakiness
)
return
self
.
gradInput
def
__repr__
(
self
):
s
=
'BatchwiseDropoutInTensor('
+
str
(
self
.
nPlanes
)
+
',p='
+
str
(
self
.
p
)
+
\
',column_offset='
+
str
(
self
.
output_column_offset
)
if
self
.
leakiness
>
0
:
s
=
s
+
',leakiness='
+
str
(
self
.
leakiness
)
s
=
s
+
')'
return
s
PyTorch/sparseconvnet/legacy/validConvolution.py
View file @
209e1e78
...
@@ -10,7 +10,6 @@ from . import SparseModule
...
@@ -10,7 +10,6 @@ from . import SparseModule
from
..utils
import
toLongTensor
,
dim_typed_fn
,
optionalTensor
,
nullptr
from
..utils
import
toLongTensor
,
dim_typed_fn
,
optionalTensor
,
nullptr
from
..sparseConvNetTensor
import
SparseConvNetTensor
from
..sparseConvNetTensor
import
SparseConvNetTensor
class
ValidConvolution
(
SparseModule
):
class
ValidConvolution
(
SparseModule
):
def
__init__
(
self
,
dimension
,
nIn
,
nOut
,
filter_size
,
bias
):
def
__init__
(
self
,
dimension
,
nIn
,
nOut
,
filter_size
,
bias
):
SparseModule
.
__init__
(
self
)
SparseModule
.
__init__
(
self
)
...
@@ -51,6 +50,7 @@ class ValidConvolution(SparseModule):
...
@@ -51,6 +50,7 @@ class ValidConvolution(SparseModule):
def
backward
(
self
,
input
,
gradOutput
,
scale
=
1
):
def
backward
(
self
,
input
,
gradOutput
,
scale
=
1
):
assert
scale
==
1
assert
scale
==
1
dim_typed_fn
(
self
.
dimension
,
input
.
features
,
'ValidConvolution_backward'
)(
dim_typed_fn
(
self
.
dimension
,
input
.
features
,
'ValidConvolution_backward'
)(
input
.
spatial_size
,
input
.
spatial_size
,
self
.
filter_size
,
self
.
filter_size
,
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment