Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
SparseConvNet
Commits
3ff930b7
Commit
3ff930b7
authored
Sep 14, 2017
by
Ed Ng
Committed by
GitHub
Sep 14, 2017
Browse files
Merge branch 'master' into batchwise_dropout
parents
df96d0c0
c8e8db32
Changes
39
Show whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
243 additions
and
86 deletions
+243
-86
PyTorch/sparseconvnet/legacy/__init__.py
PyTorch/sparseconvnet/legacy/__init__.py
+3
-1
PyTorch/sparseconvnet/legacy/affineReLUTrivialConvolution.py
PyTorch/sparseconvnet/legacy/affineReLUTrivialConvolution.py
+6
-8
PyTorch/sparseconvnet/legacy/averagePooling.py
PyTorch/sparseconvnet/legacy/averagePooling.py
+2
-3
PyTorch/sparseconvnet/legacy/batchNormalization.py
PyTorch/sparseconvnet/legacy/batchNormalization.py
+14
-9
PyTorch/sparseconvnet/legacy/convolution.py
PyTorch/sparseconvnet/legacy/convolution.py
+12
-7
PyTorch/sparseconvnet/legacy/deconvolution.py
PyTorch/sparseconvnet/legacy/deconvolution.py
+8
-6
PyTorch/sparseconvnet/legacy/denseToSparse.py
PyTorch/sparseconvnet/legacy/denseToSparse.py
+59
-0
PyTorch/sparseconvnet/legacy/leakyReLU.py
PyTorch/sparseconvnet/legacy/leakyReLU.py
+1
-1
PyTorch/sparseconvnet/legacy/maxPooling.py
PyTorch/sparseconvnet/legacy/maxPooling.py
+2
-2
PyTorch/sparseconvnet/legacy/metadata.py
PyTorch/sparseconvnet/legacy/metadata.py
+4
-4
PyTorch/sparseconvnet/legacy/misc.py
PyTorch/sparseconvnet/legacy/misc.py
+61
-0
PyTorch/sparseconvnet/legacy/networkArchitectures.py
PyTorch/sparseconvnet/legacy/networkArchitectures.py
+1
-1
PyTorch/sparseconvnet/legacy/networkInNetwork.py
PyTorch/sparseconvnet/legacy/networkInNetwork.py
+8
-7
PyTorch/sparseconvnet/legacy/sparseConvNetTensor.py
PyTorch/sparseconvnet/legacy/sparseConvNetTensor.py
+1
-1
PyTorch/sparseconvnet/legacy/sparseToDense.py
PyTorch/sparseconvnet/legacy/sparseToDense.py
+8
-4
PyTorch/sparseconvnet/legacy/validConvolution.py
PyTorch/sparseconvnet/legacy/validConvolution.py
+8
-6
PyTorch/sparseconvnet/utils.py
PyTorch/sparseconvnet/utils.py
+4
-6
Torch/C.lua
Torch/C.lua
+30
-11
Torch/SparseToDense.lua
Torch/SparseToDense.lua
+11
-9
No files found.
PyTorch/sparseconvnet/legacy/__init__.py
View file @
3ff930b7
...
...
@@ -10,12 +10,13 @@ from .inputBatch import InputBatch
from
.sparseConvNetTensor
import
SparseConvNetTensor
from
.sparseModule
import
SparseModule
from
.averagePooling
import
AveragePooling
from
.batchNormalization
import
BatchNormalization
from
.batchNormalization
import
BatchNormalization
,
BatchNormReLU
,
BatchNormLeakyReLU
,
BatchNormalizationInTensor
from
.batchwiseDropout
import
BatchwiseDropout
from
.concatTable
import
ConcatTable
from
.convolution
import
Convolution
from
.cAddTable
import
CAddTable
from
.deconvolution
import
Deconvolution
from
.denseToSparse
import
DenseToSparse
from
.identity
import
Identity
from
.joinTable
import
JoinTable
from
.leakyReLU
import
LeakyReLU
...
...
@@ -27,3 +28,4 @@ from .sparseToDense import SparseToDense
from
.validConvolution
import
ValidConvolution
from
.networkArchitectures
import
*
from
.classificationTrainValidate
import
ClassificationTrainValidate
from
.misc
import
*
PyTorch/sparseconvnet/legacy/affineReLUTrivialConvolution.py
View file @
3ff930b7
...
...
@@ -28,13 +28,11 @@ class AffineReLUTrivialConvolution(SparseModule):
self
.
nOut
=
nOut
self
.
affineWeight
=
torch
.
Tensor
(
nIn
).
fill_
(
1
)
self
.
affineBias
=
torch
.
Tensor
(
nIn
).
zero_
()
self
.
convWeight
=
torch
.
Tensor
(
nIn
,
nOut
).
normal_
(
0
,
math
.
sqrt
(
2.0
/
nIn
))
std
=
math
.
sqrt
(
2.0
/
nIn
)
self
.
convWeight
=
torch
.
Tensor
(
nIn
,
nOut
).
normal_
(
0
,
std
)
self
.
gradAffineWeight
=
torch
.
Tensor
(
nIn
).
fill_
(
0
)
self
.
gradAffineBias
=
torch
.
Tensor
(
nIn
).
zero_
()
self
.
gradConvWeight
=
torch
.
Tensor
(
nIn
,
nOut
).
zero_
(
)
self
.
gradAffineBias
=
torch
.
Tensor
(
nIn
).
zero_
(
0.333
)
self
.
gradConvWeight
=
torch
.
Tensor
(
nIn
,
nOut
).
fill_
(
std
)
self
.
additiveGrad
=
additiveGrad
self
.
output
=
SparseConvNetTensor
(
torch
.
Tensor
())
self
.
gradInput
=
torch
.
Tensor
()
...
...
@@ -46,7 +44,7 @@ class AffineReLUTrivialConvolution(SparseModule):
def
updateOutput
(
self
,
input
):
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatial_size
=
input
.
spatial_size
typed_fn
(
input
,
'AffineReluTrivialConvolution_updateOutput'
)(
typed_fn
(
input
.
features
,
'AffineReluTrivialConvolution_updateOutput'
)(
input
.
features
,
self
.
output
.
features
,
self
.
affineWeight
,
...
...
@@ -59,7 +57,7 @@ class AffineReLUTrivialConvolution(SparseModule):
def
backward
(
self
,
input
,
gradOutput
,
scale
=
1
):
assert
scale
==
1
typed_fn
(
input
,
'AffineReluTrivialConvolution_backward'
)(
typed_fn
(
input
.
features
,
'AffineReluTrivialConvolution_backward'
)(
input
.
features
,
self
.
gradInput
,
gradOutput
,
...
...
PyTorch/sparseconvnet/legacy/averagePooling.py
View file @
3ff930b7
...
...
@@ -17,7 +17,6 @@ class AveragePooling(SparseModule):
self
.
dimension
=
dimension
self
.
pool_size
=
toLongTensor
(
dimension
,
pool_size
)
self
.
pool_stride
=
toLongTensor
(
dimension
,
pool_stride
)
self
.
pool_volume
=
self
.
pool_size
.
prod
()
self
.
nFeaturesToDrop
=
nFeaturesToDrop
self
.
output
=
SparseConvNetTensor
(
torch
.
Tensor
())
self
.
gradInput
=
torch
.
Tensor
()
...
...
@@ -26,7 +25,7 @@ class AveragePooling(SparseModule):
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatial_size
=
\
(
input
.
spatial_size
-
self
.
pool_size
)
/
self
.
pool_stride
+
1
dim_typed_fn
(
self
.
dimension
,
input
,
'AveragePooling_updateOutput'
)(
dim_typed_fn
(
self
.
dimension
,
input
.
features
,
'AveragePooling_updateOutput'
)(
input
.
spatial_size
,
self
.
output
.
spatial_size
,
self
.
pool_size
,
...
...
@@ -40,7 +39,7 @@ class AveragePooling(SparseModule):
def
updateGradInput
(
self
,
input
,
gradOutput
):
dim_typed_fn
(
self
.
dimension
,
input
,
'AveragePooling_updateGradInput'
)(
self
.
dimension
,
input
.
features
,
'AveragePooling_updateGradInput'
)(
input
.
spatial_size
,
self
.
output
.
spatial_size
,
self
.
pool_size
,
...
...
PyTorch/sparseconvnet/legacy/batchNormalization.py
View file @
3ff930b7
...
...
@@ -21,7 +21,6 @@ from . import SparseModule
from
..utils
import
toLongTensor
,
typed_fn
,
optionalTensor
,
nullptr
from
.sparseConvNetTensor
import
SparseConvNetTensor
class
BatchNormalization
(
SparseModule
):
def
__init__
(
self
,
...
...
@@ -31,7 +30,6 @@ class BatchNormalization(SparseModule):
affine
=
True
,
leakiness
=
1
):
SparseModule
.
__init__
(
self
)
assert
nPlanes
%
4
==
0
self
.
nPlanes
=
nPlanes
self
.
eps
=
eps
self
.
momentum
=
momentum
...
...
@@ -44,16 +42,16 @@ class BatchNormalization(SparseModule):
if
affine
:
self
.
weight
=
torch
.
Tensor
(
nPlanes
).
fill_
(
1
)
self
.
bias
=
torch
.
Tensor
(
nPlanes
).
fill_
(
0
)
self
.
gradWeight
=
torch
.
Tensor
(
nPlanes
)
self
.
gradBias
=
torch
.
Tensor
(
nPlanes
)
self
.
gradWeight
=
torch
.
Tensor
(
nPlanes
)
.
fill_
(
0
)
self
.
gradBias
=
torch
.
Tensor
(
nPlanes
)
.
fill_
(
0.333
)
self
.
output
=
SparseConvNetTensor
(
torch
.
Tensor
())
self
.
gradInput
=
torch
.
Tensor
()
def
updateOutput
(
self
,
input
):
assert
input
.
features
.
size
(
1
)
==
self
.
nPlanes
assert
input
.
features
.
ndimension
()
==
0
or
input
.
features
.
size
(
1
)
==
self
.
nPlanes
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatial_size
=
input
.
spatial_size
typed_fn
(
input
,
'BatchNormalization_updateOutput'
)(
typed_fn
(
input
.
features
,
'BatchNormalization_updateOutput'
)(
input
.
features
,
self
.
output
.
features
,
self
.
saveMean
,
...
...
@@ -71,7 +69,7 @@ class BatchNormalization(SparseModule):
def
backward
(
self
,
input
,
gradOutput
,
scale
=
1
):
assert
scale
==
1
assert
self
.
train
typed_fn
(
input
,
'BatchNormalization_backward'
)(
typed_fn
(
input
.
features
,
'BatchNormalization_backward'
)(
input
.
features
,
self
.
gradInput
,
self
.
output
.
features
,
...
...
@@ -114,7 +112,14 @@ class BatchNormReLU(BatchNormalization):
s
=
'BatchNormReLU('
+
str
(
self
.
nPlanes
)
+
',eps='
+
str
(
self
.
eps
)
+
\
',momentum='
+
str
(
self
.
momentum
)
+
',affine='
+
str
(
self
.
affine
)
+
')'
return
s
class
BatchNormLeakyReLU
(
BatchNormalization
):
def
__init__
(
self
,
nPlanes
,
eps
=
1e-4
,
momentum
=
0.9
):
BatchNormalization
.
__init__
(
self
,
nPlanes
,
eps
,
momentum
,
True
,
0.333
)
def
__repr__
(
self
):
s
=
'BatchNormReLU('
+
str
(
self
.
nPlanes
)
+
',eps='
+
str
(
self
.
eps
)
+
\
',momentum='
+
str
(
self
.
momentum
)
+
',affine='
+
str
(
self
.
affine
)
+
')'
return
s
class
BatchNormalizationInTensor
(
BatchNormalization
):
def
__init__
(
...
...
@@ -131,7 +136,7 @@ class BatchNormalizationInTensor(BatchNormalization):
1
,
self
.
output_column_offset
,
self
.
nPlanes
)
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatial_size
=
input
.
spatial_size
typed_fn
(
input
,
'BatchNormalizationInTensor_updateOutput'
)(
typed_fn
(
input
.
features
,
'BatchNormalizationInTensor_updateOutput'
)(
input
.
features
,
o
,
self
.
saveMean
,
...
...
@@ -152,7 +157,7 @@ class BatchNormalizationInTensor(BatchNormalization):
o
=
self
.
output
.
features
.
narrow
(
1
,
self
.
output_column_offset
,
self
.
nPlanes
)
d_o
=
gradOutput
.
narrow
(
1
,
self
.
output_column_offset
,
self
.
nPlanes
)
typed_fn
(
input
,
'BatchNormalization_backward'
)(
typed_fn
(
input
.
features
,
'BatchNormalization_backward'
)(
input
.
features
,
self
.
gradInput
,
o
,
...
...
PyTorch/sparseconvnet/legacy/convolution.py
View file @
3ff930b7
...
...
@@ -20,24 +20,29 @@ class Convolution(SparseModule):
self
.
filter_size
=
toLongTensor
(
dimension
,
filter_size
)
self
.
filter_volume
=
self
.
filter_size
.
prod
()
self
.
filter_stride
=
toLongTensor
(
dimension
,
filter_stride
)
std
=
(
2.0
/
nIn
/
self
.
filter_volume
)
**
0.5
self
.
weight
=
torch
.
Tensor
(
nIn
*
self
.
filter_volume
,
nOut
).
normal_
(
0
,
(
2.0
/
nIn
/
self
.
filter_volume
)
**
0.5
)
self
.
gradWeight
=
torch
.
Tensor
(
nIn
*
self
.
filter_volume
,
nOut
)
nIn
*
self
.
filter_volume
,
nOut
).
normal_
(
0
,
std
)
self
.
gradWeight
=
torch
.
Tensor
(
nIn
*
self
.
filter_volume
,
nOut
).
fill_
(
std
)
if
bias
:
self
.
bias
=
torch
.
Tensor
(
nOut
).
zero_
()
self
.
gradBias
=
torch
.
Tensor
(
nOut
)
self
.
gradBias
=
torch
.
Tensor
(
nOut
)
.
zero_
()
self
.
output
=
SparseConvNetTensor
(
torch
.
Tensor
())
self
.
gradInput
=
torch
.
Tensor
()
def
updateOutput
(
self
,
input
):
assert
input
.
features
.
size
(
1
)
==
self
.
nIn
assert
input
.
features
.
ndimension
()
==
0
or
input
.
features
.
size
(
1
)
==
self
.
nIn
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatial_size
=
\
(
input
.
spatial_size
-
self
.
filter_size
)
/
self
.
filter_stride
+
1
s
.
forward_pass_multiplyAdd_count
+=
\
dim_typed_fn
(
self
.
dimension
,
input
,
'Convolution_updateOutput'
)(
self
.
dimension
,
input
.
features
,
'Convolution_updateOutput'
)(
input
.
spatial_size
,
self
.
output
.
spatial_size
,
self
.
filter_size
,
...
...
@@ -55,7 +60,7 @@ class Convolution(SparseModule):
def
backward
(
self
,
input
,
gradOutput
,
scale
=
1
):
assert
scale
==
1
dim_typed_fn
(
self
.
dimension
,
input
,
'Convolution_backward'
)(
self
.
dimension
,
input
.
features
,
'Convolution_backward'
)(
input
.
spatial_size
,
self
.
output
.
spatial_size
,
self
.
filter_size
,
...
...
PyTorch/sparseconvnet/legacy/deconvolution.py
View file @
3ff930b7
...
...
@@ -22,24 +22,26 @@ class Deconvolution(SparseModule):
self
.
filter_size
=
toLongTensor
(
dimension
,
filter_size
)
self
.
filter_stride
=
toLongTensor
(
dimension
,
filter_stride
)
self
.
filter_volume
=
self
.
filter_size
.
prod
()
std
=
(
2.0
/
nIn
/
self
.
filter_volume
)
**
0.5
self
.
weight
=
torch
.
Tensor
(
nIn
*
self
.
filter_volume
,
nOut
).
normal_
(
0
,
(
2.0
/
nIn
/
self
.
filter_volume
)
**
0.5
)
self
.
gradWeight
=
torch
.
Tensor
(
nIn
*
self
.
filter_volume
,
nOut
)
).
normal_
(
0
,
std
)
self
.
gradWeight
=
torch
.
Tensor
(
nIn
*
self
.
filter_volume
,
nOut
).
fill_
(
std
)
if
bias
:
self
.
bias
=
torch
.
Tensor
(
nOut
).
zero_
()
self
.
gradBias
=
torch
.
Tensor
(
nOut
)
self
.
gradBias
=
torch
.
Tensor
(
nOut
)
.
zero_
()
self
.
output
=
SparseConvNetTensor
(
torch
.
Tensor
())
self
.
gradInput
=
torch
.
Tensor
()
def
updateOutput
(
self
,
input
):
assert
input
.
features
.
size
(
1
)
==
self
.
nIn
assert
input
.
features
.
ndimension
()
==
0
or
input
.
features
.
size
(
1
)
==
self
.
nIn
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatial_size
=
\
(
input
.
spatial_size
-
1
)
*
self
.
filter_stride
+
self
.
filter_size
s
.
forward_pass_multiplyAdd_count
+=
\
dim_typed_fn
(
self
.
dimension
,
input
,
'Deconvolution_updateOutput'
)(
self
.
dimension
,
input
.
features
,
'Deconvolution_updateOutput'
)(
input
.
spatial_size
,
self
.
output
.
spatial_size
,
self
.
filter_size
,
...
...
@@ -57,7 +59,7 @@ class Deconvolution(SparseModule):
def
backward
(
self
,
input
,
gradOutput
,
scale
=
1
):
assert
scale
==
1
dim_typed_fn
(
self
.
dimension
,
input
,
'Deconvolution_backward'
)(
self
.
dimension
,
input
.
features
,
'Deconvolution_backward'
)(
input
.
spatial_size
,
self
.
output
.
spatial_size
,
self
.
filter_size
,
...
...
PyTorch/sparseconvnet/legacy/denseToSparse.py
0 → 100644
View file @
3ff930b7
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
Function to convert a Dense Input into a sparse input.
If possible, avoid using this module; build the hidden layer using InputBatch.
Parameters:
dimension : of the input field
"""
import
torch
from
.
import
SparseModule
from
..utils
import
dim_fn
,
nullptr
from
.sparseConvNetTensor
import
SparseConvNetTensor
from
.metadata
import
Metadata
class
DenseToSparse
(
SparseModule
):
def
__init__
(
self
,
dimension
):
SparseModule
.
__init__
(
self
)
self
.
dimension
=
dimension
self
.
output
=
SparseConvNetTensor
(
torch
.
Tensor
(),
Metadata
(
dimension
))
self
.
gradInput
=
torch
.
Tensor
()
def
updateOutput
(
self
,
input
):
a
=
input
aa
=
a
.
permute
(
*
([
0
,]
+
list
(
range
(
2
,
2
+
self
.
dimension
))
+
[
1
,])).
clone
()
self
.
aas
=
aa
.
size
()
nz
=
aa
.
abs
().
sum
(
self
.
dimension
+
1
).
view
(
aa
.
size
()[
0
:
-
1
])
s
=
torch
.
LongTensor
(
nz
.
stride
()).
view
(
1
,
self
.
dimension
+
1
)
nz
=
nz
.
nonzero
()
s
=
s
.
type_as
(
nz
)
aa
=
aa
.
view
(
-
1
,
a
.
size
(
1
))
self
.
aas2
=
aa
.
size
()
self
.
r
=
(
nz
*
s
.
expand_as
(
nz
)).
sum
(
1
).
view
(
-
1
)
self
.
output
.
features
=
aa
.
index_select
(
0
,
self
.
r
)
self
.
output
.
spatial_size
=
torch
.
LongTensor
(
list
(
input
.
size
()[
2
:]))
dim_fn
(
self
.
dimension
,
'createMetadataForDenseToSparse'
)(
self
.
output
.
metadata
.
ffi
,
self
.
output
.
spatial_size
,
nz
.
cpu
(),
input
.
size
(
0
))
return
self
.
output
def
updateGradInput
(
self
,
input
,
gradOutput
):
self
.
gradInput
.
resize_
(
self
.
aas2
).
zero_
()
self
.
gradInput
.
index_copy_
(
0
,
self
.
r
,
gradOutput
)
self
.
gradInput
=
self
.
gradInput
.
view
(
self
.
aas
).
permute
(
*
([
0
,
self
.
dimension
+
1
]
+
list
(
range
(
1
,
self
.
dimension
+
1
))))
return
self
.
gradInput
def
clearState
(
self
):
SparseModule
.
clearState
(
self
)
self
.
aas
=
None
self
.
r
=
None
def
__repr__
(
self
):
return
'DenseToSparse('
+
str
(
self
.
dimension
)
+
')'
PyTorch/sparseconvnet/legacy/leakyReLU.py
View file @
3ff930b7
...
...
@@ -22,7 +22,7 @@ class LeakyReLU(SparseModule):
def
updateOutput
(
self
,
input
):
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatial_size
=
input
.
spatial_size
typed_fn
(
input
,
'LeakyReLU_updateOutput'
)(
typed_fn
(
input
.
features
,
'LeakyReLU_updateOutput'
)(
input
.
features
,
self
.
output
.
features
,
self
.
leakage
)
...
...
PyTorch/sparseconvnet/legacy/maxPooling.py
View file @
3ff930b7
...
...
@@ -26,7 +26,7 @@ class MaxPooling(SparseModule):
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatial_size
=
\
(
input
.
spatial_size
-
self
.
pool_size
)
/
self
.
pool_stride
+
1
dim_typed_fn
(
self
.
dimension
,
input
,
'MaxPooling_updateOutput'
)(
dim_typed_fn
(
self
.
dimension
,
input
.
features
,
'MaxPooling_updateOutput'
)(
input
.
spatial_size
,
self
.
output
.
spatial_size
,
self
.
pool_size
,
...
...
@@ -39,7 +39,7 @@ class MaxPooling(SparseModule):
return
self
.
output
def
updateGradInput
(
self
,
input
,
gradOutput
):
dim_typed_fn
(
self
.
dimension
,
input
,
'MaxPooling_updateGradInput'
)(
dim_typed_fn
(
self
.
dimension
,
input
.
features
,
'MaxPooling_updateGradInput'
)(
input
.
spatial_size
,
self
.
output
.
spatial_size
,
self
.
pool_size
,
...
...
PyTorch/sparseconvnet/legacy/metadata.py
View file @
3ff930b7
...
...
@@ -24,16 +24,16 @@ ffi = cffi.FFI()
class
Metadata
(
object
):
def
__init__
(
self
,
dimension
,
ptr
=
0
):
#print('make meta',dimension, ptr)
self
.
dimension
=
dimension
self
.
ffi
=
ffi
.
new
(
'void *[1]'
)
scn_writePtr
(
ptr
,
self
.
ffi
)
self
.
ffigc
=
ffi
.
gc
(
self
.
ffi
,
dim_fn
(
self
.
dimension
,
'freeMetadata'
))
def
set_
(
self
):
if
hasattr
(
self
,
'ffi'
):
del
self
.
ffigc
del
self
.
ffi
dim_fn
(
self
.
dimension
,
'freeMetadata'
)(
self
.
ffi
)
# if hasattr(self, 'ffi'):
# del self.ffigc
# del self.ffi
def
__reduce__
(
self
):
if
hasattr
(
self
,
'ffi'
):
...
...
PyTorch/sparseconvnet/legacy/misc.py
0 → 100644
View file @
3ff930b7
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import
torch.legacy.nn
as
nn
from
.sequential
import
Sequential
from
.sparseModule
import
SparseModule
from
.sparseConvNetTensor
import
SparseConvNetTensor
from
.batchNormalization
import
BatchNormalization
class
Tanh
(
SparseModule
):
def
__init__
(
self
):
SparseModule
.
__init__
(
self
)
self
.
module
=
nn
.
Tanh
()
self
.
output
=
SparseConvNetTensor
()
self
.
output
.
features
=
self
.
module
.
output
self
.
gradInput
=
self
.
module
.
gradInput
def
updateOutput
(
self
,
input
):
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatial_size
=
input
.
spatial_size
self
.
module
.
forward
(
input
.
features
)
return
self
.
output
def
updateGradInput
(
self
,
input
,
gradOutput
):
self
.
module
.
updateGradInput
(
input
.
features
,
gradOutput
)
return
self
.
gradInput
def
type
(
self
,
t
,
tensorCache
=
None
):
if
t
:
self
.
module
.
type
(
t
,
tensorCache
)
self
.
output
.
features
=
self
.
module
.
output
self
.
gradInput
=
self
.
module
.
gradInput
class
ELU
(
SparseModule
):
def
__init__
(
self
):
SparseModule
.
__init__
(
self
)
self
.
module
=
nn
.
ELU
()
self
.
output
=
SparseConvNetTensor
()
self
.
gradInput
=
self
.
module
.
gradInput
def
updateOutput
(
self
,
input
):
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatial_size
=
input
.
spatial_size
self
.
module
.
forward
(
input
.
features
)
return
self
.
output
def
updateGradInput
(
self
,
input
,
gradOutput
):
self
.
module
.
updateGradInput
(
input
.
features
,
gradOutput
)
return
self
.
gradInput
def
type
(
self
,
t
,
tensorCache
=
None
):
if
t
:
self
.
module
.
type
(
t
,
tensorCache
)
self
.
output
.
features
=
self
.
module
.
output
self
.
gradInput
=
self
.
module
.
gradInput
def
BatchNormELU
(
nPlanes
,
eps
=
1e-4
,
momentum
=
0.9
):
return
Sequential
().
add
(
BatchNormalization
(
nPlanes
,
eps
,
momentum
)).
add
(
ELU
())
PyTorch/sparseconvnet/legacy/networkArchitectures.py
View file @
3ff930b7
...
...
@@ -116,7 +116,7 @@ def SparseVggNet(dimension, nInputPlanes, layers):
.
add
(
ValidConvolution
(
dimension
,
x
[
3
],
x
[
3
],
3
,
False
))
.
add
(
BatchNormReLU
(
x
[
3
]))
.
add
(
Deconvolution
(
dimension
,
x
[
3
],
x
[
3
],
3
,
2
,
False
))
)).
add
(
JoinTable
(
{
x
[
1
],
x
[
2
],
x
[
3
]
}
))
)).
add
(
JoinTable
(
[
x
[
1
],
x
[
2
],
x
[
3
]
]
))
nPlanes
=
x
[
1
]
+
x
[
2
]
+
x
[
3
]
m
.
add
(
BatchNormReLU
(
nPlanes
))
return
m
...
...
PyTorch/sparseconvnet/legacy/networkInNetwork.py
View file @
3ff930b7
...
...
@@ -16,11 +16,12 @@ class NetworkInNetwork(SparseModule):
SparseModule
.
__init__
(
self
)
self
.
nIn
=
nIn
self
.
nOut
=
nOut
self
.
weight
=
torch
.
Tensor
(
nIn
,
nOut
).
normal_
(
0
,
(
2.0
/
self
.
nIn
)
**
0.5
)
self
.
gradWeight
=
torch
.
Tensor
(
nIn
,
nOut
)
std
=
(
2.0
/
self
.
nIn
)
**
0.5
self
.
weight
=
torch
.
Tensor
(
nIn
,
nOut
).
normal_
(
0
,
std
)
self
.
gradWeight
=
torch
.
Tensor
(
nIn
,
nOut
).
fill_
(
std
)
if
bias
:
self
.
bias
=
torch
.
Tensor
(
nOut
).
fill
_
(
0
)
self
.
gradBias
=
torch
.
Tensor
(
nOut
)
self
.
bias
=
torch
.
Tensor
(
nOut
).
zero
_
()
self
.
gradBias
=
torch
.
Tensor
(
nOut
)
.
zero_
()
self
.
output
=
SparseConvNetTensor
(
torch
.
Tensor
())
self
.
gradInput
=
torch
.
Tensor
()
...
...
@@ -28,7 +29,7 @@ class NetworkInNetwork(SparseModule):
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatial_size
=
input
.
spatial_size
s
.
forward_pass_multiplyAdd_count
+=
\
typed_fn
(
input
,
'NetworkInNetwork_updateOutput'
)(
typed_fn
(
input
.
features
,
'NetworkInNetwork_updateOutput'
)(
input
.
features
,
self
.
output
.
features
,
self
.
weight
,
...
...
@@ -37,7 +38,7 @@ class NetworkInNetwork(SparseModule):
return
self
.
output
def
updateGradInput
(
self
,
input
,
gradOutput
):
typed_fn
(
input
,
'NetworkInNetwork_updateGradInput'
)(
typed_fn
(
input
.
features
,
'NetworkInNetwork_updateGradInput'
)(
self
.
gradInput
,
gradOutput
,
self
.
weight
)
...
...
@@ -45,7 +46,7 @@ class NetworkInNetwork(SparseModule):
def
accGradParameters
(
self
,
input
,
gradOutput
,
scale
=
1
):
assert
scale
==
1
typed_fn
(
input
,
'NetworkInNetwork_accGradParameters'
)(
typed_fn
(
input
.
features
,
'NetworkInNetwork_accGradParameters'
)(
input
.
features
,
gradOutput
,
self
.
gradWeight
,
...
...
PyTorch/sparseconvnet/legacy/sparseConvNetTensor.py
View file @
3ff930b7
...
...
@@ -29,7 +29,7 @@ class SparseConvNetTensor(object):
def
set_
(
self
):
self
.
features
.
set_
(
self
.
features
.
storage_type
()())
self
.
metadata
=
None
self
.
metadata
.
set_
()
self
.
spatialSize
=
None
def
__repr__
(
self
):
...
...
PyTorch/sparseconvnet/legacy/sparseToDense.py
View file @
3ff930b7
...
...
@@ -22,23 +22,27 @@ from .sparseConvNetTensor import SparseConvNetTensor
class
SparseToDense
(
SparseModule
):
def
__init__
(
self
,
dimension
):
def
__init__
(
self
,
dimension
,
nPlanes
=
None
):
SparseModule
.
__init__
(
self
)
self
.
dimension
=
dimension
self
.
output
=
torch
.
Tensor
()
self
.
gradInput
=
torch
.
FloatTensor
()
self
.
nPlanes
=
nPlanes
def
updateOutput
(
self
,
input
):
dim_typed_fn
(
self
.
dimension
,
input
,
'SparseToDense_updateOutput'
)(
if
not
self
.
nPlanes
:
self
.
nPlanes
=
input
.
features
.
size
(
1
)
dim_typed_fn
(
self
.
dimension
,
input
.
features
,
'SparseToDense_updateOutput'
)(
input
.
spatial_size
,
input
.
metadata
.
ffi
,
input
.
features
,
self
.
output
,
torch
.
cuda
.
IntTensor
()
if
input
.
features
.
is_cuda
else
nullptr
)
torch
.
cuda
.
IntTensor
()
if
input
.
features
.
is_cuda
else
nullptr
,
self
.
nPlanes
)
return
self
.
output
def
updateGradInput
(
self
,
input
,
gradOutput
):
dim_typed_fn
(
self
.
dimension
,
input
,
'SparseToDense_updateGradInput'
)(
dim_typed_fn
(
self
.
dimension
,
input
.
features
,
'SparseToDense_updateGradInput'
)(
input
.
spatial_size
,
input
.
metadata
.
ffi
,
input
.
features
,
...
...
PyTorch/sparseconvnet/legacy/validConvolution.py
View file @
3ff930b7
...
...
@@ -19,22 +19,24 @@ class ValidConvolution(SparseModule):
self
.
nOut
=
nOut
self
.
filter_size
=
toLongTensor
(
dimension
,
filter_size
)
self
.
filter_volume
=
self
.
filter_size
.
prod
()
std
=
(
2.0
/
nIn
/
self
.
filter_volume
)
**
0.5
self
.
weight
=
torch
.
Tensor
(
nIn
*
self
.
filter_volume
,
nOut
).
normal_
(
0
,
(
2.0
/
nIn
/
self
.
filter_volume
)
**
0.5
)
self
.
gradWeight
=
torch
.
Tensor
(
nIn
*
self
.
filter_volume
,
nOut
)
).
normal_
(
0
,
std
)
self
.
gradWeight
=
torch
.
Tensor
(
nIn
*
self
.
filter_volume
,
nOut
).
fill_
(
std
)
if
bias
:
self
.
bias
=
torch
.
Tensor
(
nOut
).
zero_
()
self
.
gradBias
=
torch
.
Tensor
(
nOut
)
self
.
gradBias
=
torch
.
Tensor
(
nOut
)
.
zero_
()
self
.
output
=
SparseConvNetTensor
(
torch
.
Tensor
())
self
.
gradInput
=
torch
.
Tensor
()
def
updateOutput
(
self
,
input
):
assert
input
.
features
.
size
(
1
)
==
self
.
nIn
assert
input
.
features
.
ndimension
()
==
0
or
input
.
features
.
size
(
1
)
==
self
.
nIn
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatial_size
=
input
.
spatial_size
s
.
forward_pass_multiplyAdd_count
+=
\
dim_typed_fn
(
self
.
dimension
,
input
,
'ValidConvolution_updateOutput'
)(
dim_typed_fn
(
self
.
dimension
,
input
.
features
,
'ValidConvolution_updateOutput'
)(
input
.
spatial_size
,
self
.
filter_size
,
input
.
metadata
.
ffi
,
...
...
@@ -49,7 +51,7 @@ class ValidConvolution(SparseModule):
def
backward
(
self
,
input
,
gradOutput
,
scale
=
1
):
assert
scale
==
1
dim_typed_fn
(
self
.
dimension
,
input
,
'ValidConvolution_backward'
)(
dim_typed_fn
(
self
.
dimension
,
input
.
features
,
'ValidConvolution_backward'
)(
input
.
spatial_size
,
self
.
filter_size
,
input
.
metadata
.
ffi
,
...
...
PyTorch/sparseconvnet/utils.py
View file @
3ff930b7
...
...
@@ -30,18 +30,16 @@ def dim_fn(dimension, name):
def
typed_fn
(
t
,
name
):
# print('typed_fn',dimension,name)
return
getattr
(
scn
,
'scn_'
+
typeTable
[
t
.
features
.
type
()]
+
'_'
+
name
)
# print('typed_fn',t.features.type(),name)
return
getattr
(
scn
,
'scn_'
+
typeTable
[
t
.
type
()]
+
'_'
+
name
)
def
dim_typed_fn
(
dimension
,
t
,
name
):
# print('dim_typed_fn',dimension,t,name)
# print('dim_typed_fn',dimension,t
.features.type()
,name)
return
getattr
(
scn
,
'scn_'
+
typeTable
[
t
.
features
.
type
()]
+
typeTable
[
t
.
type
()]
+
str
(
dimension
)
+
name
)
ffi
=
FFI
()
nullptr
=
ffi
.
NULL
...
...
Torch/C.lua
View file @
3ff930b7
...
...
@@ -52,12 +52,16 @@ void scn_DIMENSION_setInputSpatialLocations(void **m, THFloatTensor *features,
THLongTensor *locations, THFloatTensor *vecs, bool overwrite);
void scn_DIMENSION_getSpatialLocations(void **m, THLongTensor *spatialSize,
THLongTensor *locations);
]]
]]
for
DIMENSION
=
1
,
10
do
local
def
=
string.gsub
(
cdef
,
'DIMENSION'
,
DIMENSION
)
if
fc
then
fc
:
write
(
def
)
end
ffi
.
cdef
(
def
)
if
fc
then
def
=
string.gsub
(
def
,
'bool'
,
'_Bool'
)
fc
:
write
(
def
)
end
end
--types CPU float, double;
...
...
@@ -128,21 +132,29 @@ void scn_ARCH_REAL_NetworkInNetwork_accGradParameters(
def
=
string.gsub
(
def
,
'THITensor'
,
'void'
)
def
=
string.gsub
(
def
,
'REAL'
,
v
[
1
])
def
=
string.gsub
(
def
,
'THTensor'
,
v
[
2
])
if
fc
then
fc
:
write
(
def
)
end
ffi
.
cdef
(
def
)
if
fc
then
def
=
string.gsub
(
def
,
'bool'
,
'_Bool'
)
fc
:
write
(
def
)
end
end
if
sparseconvnet
.
cutorch
then
for
k
,
v
in
ipairs
({
{
'float'
,
'THCudaTensor'
},
{
'double'
,
'THCudaDoubleTensor'
}})
do
--{'double', 'THCudaDoubleTensor'}
})
do
local
def
=
cdef
def
=
string.gsub
(
def
,
'ARCH'
,
'gpu'
)
def
=
string.gsub
(
def
,
'THITensor'
,
sparseconvnet
.
ruleBookBits
==
64
and
'THCudaLongTensor'
or
'THCudaIntTensor'
)
def
=
string.gsub
(
def
,
'REAL'
,
v
[
1
])
def
=
string.gsub
(
def
,
'THTensor'
,
v
[
2
])
if
fg
then
fg
:
write
(
def
)
end
ffi
.
cdef
(
def
)
if
fg
then
def
=
string.gsub
(
def
,
'bool'
,
'_Bool'
)
fg
:
write
(
def
)
end
end
end
...
...
@@ -226,7 +238,7 @@ void scn_ARCH_REAL_DIMENSIONValidConvolution_backward(
THTensor *d_bias, long filterVolume, THITensor *rulesBuffer);
]]
for
_
,
v
in
ipairs
({{
'float'
,
'THFloatTensor'
},
{
'double'
,
'THDoubleTensor'
}})
do
for
_
,
v
in
ipairs
({{
'float'
,
'THFloatTensor'
},
{
'double'
,
'THDoubleTensor'
}})
do
for
DIMENSION
=
1
,
10
do
local
def
=
cdef
def
=
string.gsub
(
def
,
'ARCH'
,
'cpu'
)
...
...
@@ -234,14 +246,18 @@ void scn_ARCH_REAL_DIMENSIONValidConvolution_backward(
def
=
string.gsub
(
def
,
'THITensor'
,
'void'
)
def
=
string.gsub
(
def
,
'REAL'
,
v
[
1
])
def
=
string.gsub
(
def
,
'THTensor'
,
v
[
2
])
if
fc
then
fc
:
write
(
def
)
end
ffi
.
cdef
(
def
)
if
fc
then
def
=
string.gsub
(
def
,
'bool'
,
'_Bool'
)
fc
:
write
(
def
)
end
end
end
if
sparseconvnet
.
cutorch
then
for
k
,
v
in
ipairs
({
{
'float'
,
'THCudaTensor'
},
{
'double'
,
'THCudaDoubleTensor'
}})
do
--{'double', 'THCudaDoubleTensor'}
})
do
for
DIMENSION
=
1
,
10
do
local
def
=
cdef
def
=
string.gsub
(
def
,
'ARCH'
,
'gpu'
)
...
...
@@ -250,8 +266,11 @@ void scn_ARCH_REAL_DIMENSIONValidConvolution_backward(
'THCudaLongTensor'
or
'THCudaIntTensor'
)
def
=
string.gsub
(
def
,
'REAL'
,
v
[
1
])
def
=
string.gsub
(
def
,
'THTensor'
,
v
[
2
])
if
fg
then
fg
:
write
(
def
)
end
ffi
.
cdef
(
def
)
if
fg
then
def
=
string.gsub
(
def
,
'bool'
,
'_Bool'
)
fg
:
write
(
def
)
end
end
end
end
...
...
Torch/SparseToDense.lua
View file @
3ff930b7
...
...
@@ -5,14 +5,14 @@
-- LICENSE file in the root directory of this source tree.
--[[
Function to convert a SparseConvNet hidden layer to a dense convolutional
layer. Put a SparseToDense convolutional layer (or an ActivePooling layer) at
the top of your sparse network. The output can then pass to a dense
convolutional layers or (if the spatial dimensions have become trivial) a
linear classifier.
Function to convert a SparseConvNet hidden layer to a dense convolutional
layer. Put a SparseToDense convolutional layer (or an ActivePooling layer) at
the top of your sparse network. The output can then pass to a dense
convolutional layers or (if the spatial dimensions have become trivial) a
linear classifier.
Parameters:
dimension : of the input field
Parameters:
dimension : of the input field
]]
return
function
(
sparseconvnet
)
...
...
@@ -20,11 +20,12 @@ return function(sparseconvnet)
local
SparseToDense
,
parent
=
torch
.
class
(
'sparseconvnet.SparseToDense'
,
'nn.Module'
,
sparseconvnet
)
function
SparseToDense
:
__init
(
dimension
)
function
SparseToDense
:
__init
(
dimension
,
nPlanes
)
parent
.
__init
(
self
)
self
.
dimension
=
dimension
self
.
output
=
torch
.
Tensor
()
self
.
gradInput
=
{
features
=
torch
.
Tensor
()}
self
.
nPlanes
=
nPlanes
end
function
SparseToDense
:
updateOutput
(
input
)
...
...
@@ -34,7 +35,8 @@ return function(sparseconvnet)
input
.
metadata
.
ffi
,
input
.
features
:
cdata
(),
self
.
output
:
cdata
(),
self
.
shared
.
rulesBuffer
and
self
.
shared
.
rulesBuffer
:
cdata
())
self
.
shared
.
rulesBuffer
and
self
.
shared
.
rulesBuffer
:
cdata
(),
self
.
nPlanes
or
input
.
features
.
size
(
2
))
return
self
.
output
end
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment