Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
Pytorch-Encoding
Commits
17be9e16
Unverified
Commit
17be9e16
authored
Apr 19, 2020
by
Hang Zhang
Committed by
GitHub
Apr 19, 2020
Browse files
fix miscs (#258)
parent
b872eb8c
Changes
26
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
91 additions
and
96 deletions
+91
-96
experiments/segmentation/train_dist.py
experiments/segmentation/train_dist.py
+6
-13
setup.py
setup.py
+23
-40
tests/unit_test/test_dataset.py
tests/unit_test/test_dataset.py
+32
-24
tests/unit_test/test_function.py
tests/unit_test/test_function.py
+0
-15
tests/unit_test/test_model.py
tests/unit_test/test_model.py
+24
-0
tests/unit_test/test_utils.py
tests/unit_test/test_utils.py
+6
-4
No files found.
experiments/segmentation/train_dist.py
View file @
17be9e16
...
...
@@ -11,7 +11,6 @@ import time
import
argparse
import
numpy
as
np
from
tqdm
import
tqdm
#from mpi4py import MPI
import
torch
from
torch.utils
import
data
...
...
@@ -139,21 +138,15 @@ class Options():
print
(
args
)
return
args
#def mpi_avg_all(*args):
# comm = MPI.COMM_WORLD
# # send to master
# sum_args = []
# for arg in args:
# sum_args.append(sum(comm.gather(arg, root=0)))
# sum_args = [item / len(args) for item in sum_args]
# return tuple(sum_args)
def
torch_dist_avg
(
*
args
):
def
torch_dist_avg
(
gpu
,
*
args
):
process_group
=
torch
.
distributed
.
group
.
WORLD
tensor_args
=
[]
pending_res
=
[]
for
arg
in
args
:
tensor_arg
=
torch
.
tensor
(
arg
)
if
isinstance
(
arg
,
torch
.
Tensor
):
tensor_arg
=
arg
.
clone
().
reshape
(
1
).
detach
().
cuda
(
gpu
)
else
:
tensor_arg
=
torch
.
tensor
(
arg
).
reshape
(
1
).
cuda
(
gpu
)
tensor_args
.
append
(
tensor_arg
)
pending_res
.
append
(
torch
.
distributed
.
all_reduce
(
tensor_arg
,
group
=
process_group
,
async_op
=
True
))
for
res
in
pending_res
:
...
...
@@ -292,7 +285,7 @@ def main_worker(gpu, ngpus_per_node, args):
print
(
'pixAcc: %.3f, mIoU: %.3f'
%
(
pixAcc
,
mIoU
))
if
args
.
gpu
==
0
:
pixAcc
,
mIoU
=
torch_dist_avg
(
pixAcc
,
mIoU
)
pixAcc
,
mIoU
=
torch_dist_avg
(
args
.
gpu
,
pixAcc
,
mIoU
)
print
(
'pixAcc: %.3f, mIoU: %.3f'
%
(
pixAcc
,
mIoU
))
new_pred
=
(
pixAcc
+
mIoU
)
/
2
...
...
setup.py
View file @
17be9e16
...
...
@@ -13,8 +13,6 @@ import os
import
subprocess
from
setuptools
import
setup
,
find_packages
import
setuptools.command.develop
import
setuptools.command.install
cwd
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
...
...
@@ -35,19 +33,6 @@ def create_version_file():
f
.
write
(
'"""This is encoding version file."""
\n
'
)
f
.
write
(
"__version__ = '{}'
\n
"
.
format
(
version
))
# run test scrip after installation
class
install
(
setuptools
.
command
.
install
.
install
):
def
run
(
self
):
create_version_file
()
setuptools
.
command
.
install
.
install
.
run
(
self
)
class
develop
(
setuptools
.
command
.
develop
.
develop
):
def
run
(
self
):
create_version_file
()
setuptools
.
command
.
develop
.
develop
.
run
(
self
)
readme
=
open
(
'README.md'
).
read
()
requirements
=
[
'numpy'
,
'tqdm'
,
...
...
@@ -60,28 +45,26 @@ requirements = [
'requests'
,
]
setup
(
name
=
"torch-encoding"
,
version
=
version
,
author
=
"Hang Zhang"
,
author_email
=
"zhanghang0704@gmail.com"
,
url
=
"https://github.com/zhanghang1989/PyTorch-Encoding"
,
description
=
"PyTorch Encoding Package"
,
long_description
=
readme
,
long_description_content_type
=
'text/markdown'
,
license
=
'MIT'
,
install_requires
=
requirements
,
packages
=
find_packages
(
exclude
=
[
"tests"
,
"experiments"
]),
package_data
=
{
'encoding'
:
[
'LICENSE'
,
'lib/cpu/*.h'
,
'lib/cpu/*.cpp'
,
'lib/gpu/*.h'
,
'lib/gpu/*.cpp'
,
'lib/gpu/*.cu'
,
]},
cmdclass
=
{
'install'
:
install
,
'develop'
:
develop
,
},
)
if
__name__
==
'__main__'
:
create_version_file
()
setup
(
name
=
"torch-encoding"
,
version
=
version
,
author
=
"Hang Zhang"
,
author_email
=
"zhanghang0704@gmail.com"
,
url
=
"https://github.com/zhanghang1989/PyTorch-Encoding"
,
description
=
"PyTorch Encoding Package"
,
long_description
=
open
(
'README.md'
).
read
(),
long_description_content_type
=
'text/markdown'
,
license
=
'MIT'
,
install_requires
=
requirements
,
packages
=
find_packages
(
exclude
=
[
"tests"
,
"experiments"
]),
package_data
=
{
'encoding'
:
[
'LICENSE'
,
'lib/cpu/*.h'
,
'lib/cpu/*.cpp'
,
'lib/gpu/*.h'
,
'lib/gpu/*.cpp'
,
'lib/gpu/*.cu'
,
]},
)
tests/unit_test/test_dataset.py
View file @
17be9e16
import
argparse
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: zhang.hang@rutgers.edu
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
from
tqdm
import
tqdm
from
torch.utils
import
data
import
torchvision.transforms
as
transform
from
encoding.datasets
import
get_segmentation_dataset
def
main
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Test Dataset.'
)
parser
.
add_argument
(
'--dataset'
,
type
=
str
,
default
=
'ade20k'
,
help
=
'dataset name (default: pascal12)'
)
args
=
parser
.
parse_args
()
def
test_ade_dataset
():
input_transform
=
transform
.
Compose
([
transform
.
ToTensor
(),
transform
.
Normalize
([.
485
,
.
456
,
.
406
],
[.
229
,
.
224
,
.
225
])])
trainset
=
get_segmentation_dataset
(
args
.
dataset
,
split
=
'val'
,
mode
=
'train'
,
transform
=
input_transform
)
trainloader
=
data
.
DataLoader
(
trainset
,
batch_size
=
16
,
drop_last
=
True
,
shuffle
=
True
)
tbar
=
tqdm
(
trainloader
)
max_label
=
-
10
for
i
,
(
image
,
target
)
in
enumerate
(
tbar
):
tmax
=
target
.
max
().
item
()
tmin
=
target
.
min
().
item
()
assert
(
tmin
>=
-
1
)
if
tmax
>
max_label
:
max_label
=
tmax
assert
(
max_label
<
trainset
.
NUM_CLASS
)
tbar
.
set_description
(
"Batch %d, max label %d"
%
(
i
,
max_label
))
def
test_dataset
(
dataset_name
):
input_transform
=
transform
.
Compose
([
transform
.
ToTensor
(),
transform
.
Normalize
([.
485
,
.
456
,
.
406
],
[.
229
,
.
224
,
.
225
])])
trainset
=
get_segmentation_dataset
(
dataset_name
,
split
=
'val'
,
mode
=
'train'
,
transform
=
input_transform
)
trainloader
=
data
.
DataLoader
(
trainset
,
batch_size
=
16
,
drop_last
=
True
,
shuffle
=
True
)
tbar
=
tqdm
(
trainloader
)
max_label
=
-
10
for
i
,
(
image
,
target
)
in
enumerate
(
tbar
):
tmax
=
target
.
max
().
item
()
tmin
=
target
.
min
().
item
()
assert
(
tmin
>=
-
1
)
if
tmax
>
max_label
:
max_label
=
tmax
assert
(
max_label
<
trainset
.
NUM_CLASS
)
tbar
.
set_description
(
"Batch %d, max label %d"
%
(
i
,
max_label
))
test_dataset
(
'ade20k'
)
if
__name__
==
"__main__"
:
main
()
import
nose
nose
.
runmodule
()
tests/unit_test/test_function.py
View file @
17be9e16
...
...
@@ -55,21 +55,6 @@ def test_moments():
test
=
gradcheck
(
encoding
.
functions
.
moments
,
input
,
eps
=
EPS
,
atol
=
ATOL
)
print
(
'Testing moments(): {}'
.
format
(
test
))
def
test_syncbn_func
():
# generate input
B
,
C
,
H
=
2
,
3
,
4
X
=
Variable
(
torch
.
cuda
.
DoubleTensor
(
B
,
C
,
H
).
uniform_
(
-
0.5
,
0.5
),
requires_grad
=
True
)
gamma
=
Variable
(
torch
.
cuda
.
DoubleTensor
(
C
).
uniform_
(
-
0.5
,
0.5
),
requires_grad
=
True
)
beta
=
Variable
(
torch
.
cuda
.
DoubleTensor
(
C
).
uniform_
(
-
0.5
,
0.5
),
requires_grad
=
True
)
mean
=
Variable
(
torch
.
cuda
.
DoubleTensor
(
C
).
uniform_
(
-
0.5
,
0.5
),
requires_grad
=
True
)
std
=
Variable
(
torch
.
cuda
.
DoubleTensor
(
C
).
uniform_
(
-
0.5
,
0.5
),
requires_grad
=
True
)
N
=
B
*
H
inputs
=
(
X
,
mean
,
std
,
gamma
,
beta
)
# grad check
test
=
gradcheck
(
encoding
.
functions
.
batchnormtrain
,
inputs
,
eps
=
EPS
,
atol
=
ATOL
)
print
(
'Testing batchnorm(): {}'
.
format
(
test
))
def
test_non_max_suppression
():
def
_test_nms
(
cuda
):
# check a small test case
...
...
tests/unit_test/test_model.py
0 → 100644
View file @
17be9e16
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## Email: zhanghang0704@gmail.com
## Copyright (c) 2020
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import
torch
import
encoding
def
test_model_inference
():
x
=
torch
.
rand
(
1
,
3
,
224
,
224
)
for
model_name
in
encoding
.
models
.
pretrained_model_list
():
print
(
'Doing: '
,
model_name
)
if
'wideresnet'
in
model_name
:
continue
# need multi-gpu
model
=
encoding
.
models
.
get_model
(
model_name
,
pretrained
=
True
)
model
.
eval
()
y
=
model
(
x
)
if
__name__
==
"__main__"
:
import
nose
nose
.
runmodule
()
tests/unit_test/test_utils.py
View file @
17be9e16
...
...
@@ -19,11 +19,13 @@ def test_segmentation_metrics():
pixAcc
=
1.0
*
pixel_correct
/
(
np
.
spacing
(
1
)
+
pixel_labeled
)
IoU
=
1.0
*
area_inter
/
(
np
.
spacing
(
1
)
+
area_union
)
mIoU
=
IoU
.
mean
()
print
(
'numpy predictionis :'
,
pixAcc
,
mIoU
)
print
(
'numpy predictionis :'
,
pixAcc
,
mIoU
)
# torch metric prediction
pixel_correct
,
pixel_labeled
=
batch_pix_accuracy
(
tim_pred
,
tim_lab
)
area_inter
,
area_union
=
batch_intersection_union
(
tim_pred
,
tim_lab
,
nclass
)
pixAcc
=
1.0
*
pixel_correct
/
(
np
.
spacing
(
1
)
+
pixel_labeled
)
batch_
pixAcc
=
1.0
*
pixel_correct
/
(
np
.
spacing
(
1
)
+
pixel_labeled
)
IoU
=
1.0
*
area_inter
/
(
np
.
spacing
(
1
)
+
area_union
)
mIoU
=
IoU
.
mean
()
print
(
'torch predictionis :'
,
pixAcc
,
mIoU
)
batch_mIoU
=
IoU
.
mean
()
print
(
'torch predictionis :'
,
batch_pixAcc
,
batch_mIoU
)
assert
(
batch_pixAcc
-
pixAcc
)
<
1e-3
assert
(
batch_mIoU
-
mIoU
)
<
1e-3
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment