Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
Pytorch-Encoding
Commits
2d21747a
Commit
2d21747a
authored
Jun 04, 2018
by
Zhang
Browse files
v0.4.2
parent
7e19143c
Changes
82
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
97 additions
and
87 deletions
+97
-87
setup.py
setup.py
+39
-26
tests/unit_test/test_module.py
tests/unit_test/test_module.py
+58
-61
No files found.
setup.py
View file @
2d21747a
...
@@ -18,22 +18,7 @@ import setuptools.command.install
...
@@ -18,22 +18,7 @@ import setuptools.command.install
cwd
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
cwd
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
# run test scrip after installation
version
=
'0.4.2'
class
install
(
setuptools
.
command
.
install
.
install
):
def
run
(
self
):
self
.
create_version_file
()
setuptools
.
command
.
install
.
install
.
run
(
self
)
#subprocess.check_call("python tests/unit_test.py".split())
@
staticmethod
def
create_version_file
():
global
version
,
cwd
print
(
'-- Building version '
+
version
)
version_path
=
os
.
path
.
join
(
cwd
,
'encoding'
,
'version.py'
)
with
open
(
version_path
,
'w'
)
as
f
:
f
.
write
(
'"""This is encoding version file."""
\n
'
)
f
.
write
(
"__version__ = '{}'
\n
"
.
format
(
version
))
version
=
'0.4.0'
try
:
try
:
sha
=
subprocess
.
check_output
([
'git'
,
'rev-parse'
,
'HEAD'
],
sha
=
subprocess
.
check_output
([
'git'
,
'rev-parse'
,
'HEAD'
],
cwd
=
cwd
).
decode
(
'ascii'
).
strip
()
cwd
=
cwd
).
decode
(
'ascii'
).
strip
()
...
@@ -41,6 +26,27 @@ try:
...
@@ -41,6 +26,27 @@ try:
except
Exception
:
except
Exception
:
pass
pass
def
create_version_file
():
global
version
,
cwd
print
(
'-- Building version '
+
version
)
version_path
=
os
.
path
.
join
(
cwd
,
'encoding'
,
'version.py'
)
with
open
(
version_path
,
'w'
)
as
f
:
f
.
write
(
'"""This is encoding version file."""
\n
'
)
f
.
write
(
"__version__ = '{}'
\n
"
.
format
(
version
))
# run test scrip after installation
class
install
(
setuptools
.
command
.
install
.
install
):
def
run
(
self
):
create_version_file
()
setuptools
.
command
.
install
.
install
.
run
(
self
)
#subprocess.check_call("python tests/unit_test.py".split())
class
develop
(
setuptools
.
command
.
develop
.
develop
):
def
run
(
self
):
create_version_file
()
setuptools
.
command
.
develop
.
develop
.
run
(
self
)
#subprocess.check_call("python tests/unit_test.py".split())
try
:
try
:
import
pypandoc
import
pypandoc
readme
=
pypandoc
.
convert
(
'README.md'
,
'rst'
)
readme
=
pypandoc
.
convert
(
'README.md'
,
'rst'
)
...
@@ -51,10 +57,20 @@ requirements = [
...
@@ -51,10 +57,20 @@ requirements = [
'numpy'
,
'numpy'
,
'tqdm'
,
'tqdm'
,
'nose'
,
'nose'
,
'torch>=0.
3.1
'
,
'torch>=0.
5.0
'
,
'cffi>=1.0.0'
,
'cffi>=1.0.0'
,
]
]
requirements
=
[
'numpy'
,
'tqdm'
,
'nose'
,
'torch>=0.4.0'
,
'Pillow'
,
'scipy'
,
'requests'
,
]
setup
(
setup
(
name
=
"encoding"
,
name
=
"encoding"
,
version
=
version
,
version
=
version
,
...
@@ -67,17 +83,14 @@ setup(
...
@@ -67,17 +83,14 @@ setup(
install_requires
=
requirements
,
install_requires
=
requirements
,
packages
=
find_packages
(
exclude
=
[
"tests"
,
"experiments"
]),
packages
=
find_packages
(
exclude
=
[
"tests"
,
"experiments"
]),
package_data
=
{
'encoding'
:
[
package_data
=
{
'encoding'
:
[
'lib/*.so*'
,
'lib/*.dylib*'
,
'lib/cpu/*.h'
,
'_ext/encoding_lib/*.so'
,
'_ext/encoding_lib/*.dylib'
,
'lib/cpu/*.cpp'
,
'kernel/*.h'
,
'kernel/generic/*h'
,
'lib/gpu/*.h'
,
'src/*.h'
,
'lib/gpu/*.cpp'
,
'lib/gpu/*.cu'
,
]},
]},
ext_package
=
""
,
# Extensions to compile.
cffi_modules
=
[
os
.
path
.
join
(
cwd
,
"build.py:ffi"
)
],
cmdclass
=
{
cmdclass
=
{
'install'
:
install
,
'install'
:
install
,
'develop'
:
develop
,
},
},
)
)
tests/unit_test.py
→
tests/unit_test
/test_module
.py
View file @
2d21747a
...
@@ -15,12 +15,12 @@ from torch.autograd import Variable, gradcheck
...
@@ -15,12 +15,12 @@ from torch.autograd import Variable, gradcheck
import
encoding
import
encoding
EPS
=
1e-
6
EPS
=
1e-
3
ATOL
=
1e-
4
ATOL
=
1e-
3
def
_assert_tensor_close
(
a
,
b
,
atol
=
ATOL
,
rtol
=
EPS
):
def
_assert_tensor_close
(
a
,
b
,
atol
=
ATOL
,
rtol
=
EPS
):
npa
,
npb
=
a
.
cpu
().
numpy
(),
b
.
cpu
().
numpy
()
npa
,
npb
=
a
.
cpu
().
numpy
(),
b
.
cpu
().
numpy
()
assert
np
.
allclose
(
npa
,
npb
,
atol
=
atol
),
\
assert
np
.
allclose
(
npa
,
npb
,
rtol
=
rtol
,
atol
=
atol
),
\
'Tensor close check failed
\n
{}
\n
{}
\n
adiff={}, rdiff={}'
.
format
(
'Tensor close check failed
\n
{}
\n
{}
\n
adiff={}, rdiff={}'
.
format
(
a
,
b
,
np
.
abs
(
npa
-
npb
).
max
(),
np
.
abs
((
npa
-
npb
)
/
np
.
fmax
(
npa
,
1e-5
)).
max
())
a
,
b
,
np
.
abs
(
npa
-
npb
).
max
(),
np
.
abs
((
npa
-
npb
)
/
np
.
fmax
(
npa
,
1e-5
)).
max
())
...
@@ -79,95 +79,92 @@ def test_all_reduce():
...
@@ -79,95 +79,92 @@ def test_all_reduce():
for
i
in
range
(
1
,
ngpu
):
for
i
in
range
(
1
,
ngpu
):
_assert_tensor_close
(
Y
[
i
].
data
,
Y
[
0
].
data
)
_assert_tensor_close
(
Y
[
i
].
data
,
Y
[
0
].
data
)
input
=
(
1
,
*
X
)
input
=
(
1
,
*
X
)
#test = gradcheck(encoding.parallel.allreduce, input, eps=EPS, atol=ATOL)
test
=
gradcheck
(
encoding
.
parallel
.
allreduce
,
input
,
eps
=
EPS
,
atol
=
ATOL
)
test
=
gradcheck
(
encoding
.
parallel
.
allreduce
,
input
,
eps
=
EPS
,
atol
=
ATOL
)
print
(
'Testing allreduce(): {}'
.
format
(
test
))
def
_test_syncbn
(
train_mode
=
True
):
def
test_syncbn
():
train_mode
=
True
# generate input
# generate input
B
,
C
,
H
,
W
=
8
,
3
,
4
,
5
B
,
C
,
H
,
W
=
8
,
3
,
4
,
5
X
=
Variable
(
torch
.
cuda
.
DoubleTensor
(
B
,
C
,
H
,
W
).
uniform_
(
-
0.5
,
0.5
),
X
=
Variable
(
torch
.
cuda
.
DoubleTensor
(
B
,
C
,
H
,
W
).
uniform_
(
-
0.5
,
0.5
),
requires_grad
=
True
)
requires_grad
=
True
)
input
=
(
X
,)
input
=
(
X
,)
# SyncBN using DataParallel
# SyncBN using DataParallel
layer
=
encoding
.
nn
.
Sync
BatchNorm2d
(
C
)
layer
=
encoding
.
nn
.
BatchNorm2d
(
C
)
model
=
torch
.
nn
.
DataParallel
(
layer
).
double
().
cuda
()
model
=
torch
.
nn
.
DataParallel
(
layer
).
double
().
cuda
()
encoding
.
parallel
.
patch_replication_callback
(
model
)
layer
.
train
(
train_mode
)
layer
.
train
(
train_mode
)
# grad check
# grad check
test
=
gradcheck
(
model
,
input
,
eps
=
EPS
,
atol
=
ATOL
)
test
=
gradcheck
(
model
,
input
,
eps
=
EPS
,
atol
=
ATOL
)
print
(
'Testing
Sync
BatchNorm2d(): {}'
.
format
(
test
))
print
(
'Testing BatchNorm2d(): {}'
.
format
(
test
))
def
_
test_syncbn_func
(
train_mode
=
True
):
def
test_syncbn_func
():
# generate input
# generate input
B
,
C
,
H
=
2
,
3
,
4
B
,
C
,
H
=
2
,
3
,
4
X
=
Variable
(
torch
.
cuda
.
DoubleTensor
(
B
,
C
,
H
).
uniform_
(
-
0.5
,
0.5
),
X
=
Variable
(
torch
.
cuda
.
DoubleTensor
(
B
,
C
,
H
).
uniform_
(
-
0.5
,
0.5
),
requires_grad
=
True
)
requires_grad
=
True
)
xsum
=
Variable
(
torch
.
ones
(
C
).
double
().
cuda
(),
requires_grad
=
True
)
gamma
=
Variable
(
torch
.
cuda
.
DoubleTensor
(
C
).
uniform_
(
-
0.5
,
0.5
),
requires_grad
=
True
)
xsqsum
=
Variable
(
torch
.
ones
(
C
).
double
().
cuda
(),
requires_grad
=
True
)
beta
=
Variable
(
torch
.
cuda
.
DoubleTensor
(
C
).
uniform_
(
-
0.5
,
0.5
),
requires_grad
=
True
)
gamma
=
Variable
(
torch
.
ones
(
C
).
double
().
cuda
(),
requires_grad
=
True
)
mean
=
Variable
(
torch
.
cuda
.
DoubleTensor
(
C
).
uniform_
(
-
0.5
,
0.5
),
requires_grad
=
True
)
beta
=
Variable
(
torch
.
zeros
(
C
).
double
().
cuda
(),
requires_grad
=
True
)
std
=
Variable
(
torch
.
cuda
.
DoubleTensor
(
C
).
uniform_
(
-
0.5
,
0.5
),
requires_grad
=
True
)
gamma
.
requires_grad
=
True
beta
.
requires_grad
=
True
runningVar
=
torch
.
ones
(
C
).
double
().
cuda
()
runningMean
=
torch
.
zeros
(
C
).
double
().
cuda
()
N
=
B
*
H
N
=
B
*
H
inputs
=
(
X
,
xsum
,
xsqsum
,
gamma
,
beta
,
runningMean
,
runningVar
,
N
,
0.1
,
1e-5
,
train_mode
)
inputs
=
(
X
,
mean
,
std
,
gamma
,
beta
)
# grad check
# grad check
test
=
gradcheck
(
encoding
.
functions
.
batchnorm
.
apply
,
inputs
,
eps
=
EPS
,
atol
=
ATOL
)
test
=
gradcheck
(
encoding
.
functions
.
batchnorm
train
,
inputs
,
eps
=
EPS
,
atol
=
ATOL
)
print
(
'Testing batchnorm(): {}'
.
format
(
test
))
print
(
'Testing batchnorm(): {}'
.
format
(
test
))
def
_checkBatchNormResult
(
bn1
,
bn2
,
input
,
is_train
,
cuda
=
False
):
def
testSyncBN
():
def
_find_bn
(
module
):
def
_checkBatchNormResult
(
bn1
,
bn2
,
input
,
is_train
,
cuda
=
False
):
for
m
in
module
.
modules
():
def
_find_bn
(
module
):
if
isinstance
(
m
,
(
torch
.
nn
.
BatchNorm1d
,
torch
.
nn
.
BatchNorm2d
,
for
m
in
module
.
modules
():
encoding
.
nn
.
SyncBatchNorm1d
,
encoding
.
nn
.
SyncBatchNorm2d
)):
if
isinstance
(
m
,
(
torch
.
nn
.
BatchNorm1d
,
torch
.
nn
.
BatchNorm2d
,
return
m
encoding
.
nn
.
BatchNorm1d
,
encoding
.
nn
.
BatchNorm2d
)):
def
_syncParameters
(
bn1
,
bn2
):
return
m
bn1
.
reset_parameters
()
def
_syncParameters
(
bn1
,
bn2
):
bn2
.
reset_parameters
()
bn1
.
reset_parameters
()
if
bn1
.
affine
and
bn2
.
affine
:
bn2
.
reset_parameters
()
bn2
.
weight
.
data
.
copy_
(
bn1
.
weight
.
data
)
if
bn1
.
affine
and
bn2
.
affine
:
bn2
.
bias
.
data
.
copy_
(
bn1
.
bias
.
data
)
bn2
.
weight
.
data
.
copy_
(
bn1
.
weight
.
data
)
bn2
.
running_mean
.
copy_
(
bn1
.
running_mean
)
bn2
.
bias
.
data
.
copy_
(
bn1
.
bias
.
data
)
bn2
.
running_var
.
copy_
(
bn1
.
running_var
)
bn2
.
running_mean
.
copy_
(
bn1
.
running_mean
)
bn2
.
running_var
.
copy_
(
bn1
.
running_var
)
bn1
.
train
(
mode
=
is_train
)
bn2
.
train
(
mode
=
is_train
)
bn1
.
train
(
mode
=
is_train
)
bn2
.
train
(
mode
=
is_train
)
if
cuda
:
input
=
input
.
cuda
()
if
cuda
:
# using the same values for gamma and beta
input
=
input
.
cuda
()
_syncParameters
(
_find_bn
(
bn1
),
_find_bn
(
bn2
))
# using the same values for gamma and beta
_syncParameters
(
_find_bn
(
bn1
),
_find_bn
(
bn2
))
input1
=
Variable
(
input
.
clone
(),
requires_grad
=
True
)
output1
=
bn1
(
input1
)
input1
=
Variable
(
input
.
clone
().
detach
(),
requires_grad
=
True
)
input2
=
Variable
(
input
.
clone
(),
requires_grad
=
True
)
input2
=
Variable
(
input
.
clone
().
detach
(),
requires_grad
=
True
)
output2
=
bn2
(
input2
)
output1
=
bn1
(
input1
)
output2
=
bn2
(
input2
)
_assert_tensor_close
(
input1
.
data
,
input2
.
data
)
# assert forwarding
_assert_tensor_close
(
output1
.
data
,
output2
.
data
)
_assert_tensor_close
(
input1
.
data
,
input2
.
data
)
if
not
is_train
:
_assert_tensor_close
(
output1
.
data
,
output2
.
data
)
return
if
not
is_train
:
(
output1
**
2
).
sum
().
backward
()
return
(
output2
**
2
).
sum
().
backward
()
(
output1
**
2
).
sum
().
backward
()
_assert_tensor_close
(
input1
.
grad
.
data
,
input2
.
grad
.
data
)
(
output2
**
2
).
sum
().
backward
()
_assert_tensor_close
(
_find_bn
(
bn1
).
running_mean
,
_find_bn
(
bn2
).
running_mean
)
_assert_tensor_close
(
input1
.
grad
.
data
,
input2
.
grad
.
data
)
_assert_tensor_close
(
_find_bn
(
bn1
).
running_var
,
_find_bn
(
bn2
).
running_var
)
_assert_tensor_close
(
_find_bn
(
bn1
).
running_mean
,
_find_bn
(
bn2
).
running_mean
)
_assert_tensor_close
(
_find_bn
(
bn1
).
running_var
,
_find_bn
(
bn2
).
running_var
)
def
testSyncBN
():
bn
=
torch
.
nn
.
BatchNorm2d
(
10
).
cuda
().
double
()
bn
=
torch
.
nn
.
BatchNorm2d
(
10
).
cuda
().
double
()
sync_bn
=
encoding
.
nn
.
Sync
BatchNorm2d
(
10
).
double
()
sync_bn
=
encoding
.
nn
.
BatchNorm2d
(
10
).
double
()
sync_bn
=
torch
.
nn
.
DataParallel
(
sync_bn
).
cuda
()
sync_bn
=
torch
.
nn
.
DataParallel
(
sync_bn
).
cuda
()
encoding
.
parallel
.
patch_replication_callback
(
sync_bn
)
# check with unsync version
# check with unsync version
for
i
in
range
(
10
):
for
i
in
range
(
10
):
print
(
i
)
_checkBatchNormResult
(
bn
,
sync_bn
,
torch
.
rand
(
16
,
10
,
16
,
16
).
double
(),
True
,
cuda
=
True
)
_checkBatchNormResult
(
bn
,
sync_bn
,
torch
.
rand
(
16
,
10
,
16
,
16
).
double
(),
True
,
cuda
=
True
)
_checkBatchNormResult
(
bn
,
sync_bn
,
torch
.
rand
(
16
,
10
,
16
,
16
).
double
(),
False
,
cuda
=
True
)
_checkBatchNormResult
(
bn
,
sync_bn
,
torch
.
rand
(
16
,
10
,
16
,
16
).
double
(),
False
,
cuda
=
True
)
# gradcheck
_test_syncbn_func
(
True
)
_test_syncbn
(
True
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
import
nose
import
nose
...
...
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment