Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
SparseConvNet
Commits
d8c3aff1
"git@developer.sourcefind.cn:jerrrrry/infinicore.git" did not exist on "70862bcc9803c1cc1cab16ac0c1a142ce980a669"
Commit
d8c3aff1
authored
Sep 13, 2017
by
Benjamin Thomas Graham
Browse files
tidy
parent
5f0860fc
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
62 additions
and
29 deletions
+62
-29
PyTorch/sparseconvnet/legacy/__init__.py
PyTorch/sparseconvnet/legacy/__init__.py
+3
-2
PyTorch/sparseconvnet/legacy/batchNormalization.py
PyTorch/sparseconvnet/legacy/batchNormalization.py
+0
-2
PyTorch/sparseconvnet/legacy/leakyReLU.py
PyTorch/sparseconvnet/legacy/leakyReLU.py
+0
-25
PyTorch/sparseconvnet/legacy/misc.py
PyTorch/sparseconvnet/legacy/misc.py
+59
-0
No files found.
PyTorch/sparseconvnet/legacy/__init__.py
View file @
d8c3aff1
...
...
@@ -10,7 +10,7 @@ from .inputBatch import InputBatch
from
.sparseConvNetTensor
import
SparseConvNetTensor
from
.sparseModule
import
SparseModule
from
.averagePooling
import
AveragePooling
from
.batchNormalization
import
BatchNormReLU
,
BatchNormLeakyReLU
,
BatchNormalizationInTensor
from
.batchNormalization
import
BatchNormalization
,
BatchNormReLU
,
BatchNormLeakyReLU
,
BatchNormalizationInTensor
from
.concatTable
import
ConcatTable
from
.convolution
import
Convolution
from
.cAddTable
import
CAddTable
...
...
@@ -18,7 +18,7 @@ from .deconvolution import Deconvolution
from
.denseToSparse
import
DenseToSparse
from
.identity
import
Identity
from
.joinTable
import
JoinTable
from
.leakyReLU
import
LeakyReLU
,
Tanh
from
.leakyReLU
import
LeakyReLU
from
.maxPooling
import
MaxPooling
from
.networkInNetwork
import
NetworkInNetwork
from
.reLU
import
ReLU
...
...
@@ -27,3 +27,4 @@ from .sparseToDense import SparseToDense
from
.validConvolution
import
ValidConvolution
from
.networkArchitectures
import
*
from
.classificationTrainValidate
import
ClassificationTrainValidate
from
.misc
import
*
PyTorch/sparseconvnet/legacy/batchNormalization.py
View file @
d8c3aff1
...
...
@@ -21,7 +21,6 @@ from . import SparseModule
from
..utils
import
toLongTensor
,
typed_fn
,
optionalTensor
,
nullptr
from
.sparseConvNetTensor
import
SparseConvNetTensor
class
BatchNormalization
(
SparseModule
):
def
__init__
(
self
,
...
...
@@ -122,7 +121,6 @@ class BatchNormLeakyReLU(BatchNormalization):
',momentum='
+
str
(
self
.
momentum
)
+
',affine='
+
str
(
self
.
affine
)
+
')'
return
s
class
BatchNormalizationInTensor
(
BatchNormalization
):
def
__init__
(
self
,
...
...
PyTorch/sparseconvnet/legacy/leakyReLU.py
View file @
d8c3aff1
...
...
@@ -42,28 +42,3 @@ class LeakyReLU(SparseModule):
if
t
:
self
.
output
.
type
(
t
)
self
.
gradInput
=
self
.
gradInput
.
type
(
t
)
class
Tanh
(
SparseModule
):
def
__init__
(
self
):
SparseModule
.
__init__
(
self
)
self
.
output
=
SparseConvNetTensor
(
torch
.
Tensor
())
#self.gradInput = None if ip else torch.Tensor()
self
.
gradInput
=
torch
.
Tensor
()
def
updateOutput
(
self
,
input
):
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatial_size
=
input
.
spatial_size
self
.
output
.
features
=
torch
.
tanh
(
input
.
features
)
return
self
.
output
def
updateGradInput
(
self
,
input
,
gradOutput
):
self
.
gradInput
.
resize_as_
(
gradOutput
).
copy_
(
gradOutput
)
self
.
gradInput
.
mul
(
1
+
self
.
output
.
features
)
self
.
gradInput
.
mul
(
1
-
self
.
output
.
features
)
return
self
.
gradInput
def
type
(
self
,
t
,
tensorCache
=
None
):
if
t
:
self
.
output
.
type
(
t
)
self
.
gradInput
=
self
.
gradInput
.
type
(
t
)
PyTorch/sparseconvnet/legacy/misc.py
0 → 100644
View file @
d8c3aff1
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import
torch.legacy.nn
as
nn
from
.sequential
import
Sequential
from
.sparseModule
import
SparseModule
class
Tanh
(
SparseModule
):
def
__init__
(
self
):
SparseModule
.
__init__
(
self
)
self
.
module
=
nn
.
Tanh
()
self
.
output
=
SparseConvNetTensor
()
self
.
output
.
features
=
self
.
module
.
output
self
.
gradInput
=
self
.
module
.
gradInput
def
updateOutput
(
self
,
input
):
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatial_size
=
input
.
spatial_size
self
.
module
.
forward
(
input
.
features
)
return
self
.
output
def
updateGradInput
(
self
,
input
,
gradOutput
):
self
.
module
.
updateGradInput
(
input
.
features
,
gradOutput
)
return
self
.
gradInput
def
type
(
self
,
t
,
tensorCache
=
None
):
if
t
:
self
.
module
.
type
(
t
,
tensorCache
)
self
.
output
.
features
=
self
.
module
.
output
self
.
gradInput
=
self
.
module
.
gradInput
class
ELU
(
SparseModule
):
def
__init__
(
self
):
SparseModule
.
__init__
(
self
)
self
.
module
=
nn
.
ELU
()
self
.
output
=
SparseConvNetTensor
()
self
.
gradInput
=
self
.
module
.
gradInput
def
updateOutput
(
self
,
input
):
self
.
output
.
metadata
=
input
.
metadata
self
.
output
.
spatial_size
=
input
.
spatial_size
self
.
module
.
forward
(
input
.
features
)
return
self
.
output
def
updateGradInput
(
self
,
input
,
gradOutput
):
self
.
module
.
updateGradInput
(
input
.
features
,
gradOutput
)
return
self
.
gradInput
def
type
(
self
,
t
,
tensorCache
=
None
):
if
t
:
self
.
module
.
type
(
t
,
tensorCache
)
self
.
output
.
features
=
self
.
module
.
output
self
.
gradInput
=
self
.
module
.
gradInput
def
BatchNormELU
(
nPlanes
,
eps
=
1e-4
,
momentum
=
0.9
):
return
Sequential
().
add
(
BatchNormalization
(
nPlanes
,
eps
,
momentum
)).
add
(
ELU
())
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment