Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
SOLOv2-pytorch
Commits
453e151f
Commit
453e151f
authored
Mar 28, 2020
by
WXinlong
Browse files
add SOLO
parent
695fcddd
Changes
21
Hide whitespace changes
Inline
Side-by-side
Showing
1 changed file
with
312 additions
and
0 deletions
+312
-0
tools/test_ins_vis.py
tools/test_ins_vis.py
+312
-0
No files found.
tools/test_ins_vis.py
0 → 100644
View file @
453e151f
import
argparse
import
os
import
os.path
as
osp
import
shutil
import
tempfile
from
scipy
import
ndimage
import
mmcv
import
torch
import
torch.distributed
as
dist
from
mmcv.parallel
import
MMDataParallel
,
MMDistributedDataParallel
from
mmcv.runner
import
init_dist
,
get_dist_info
,
load_checkpoint
from
mmdet.core
import
coco_eval
,
results2json
,
wrap_fp16_model
,
tensor2imgs
,
get_classes
from
mmdet.datasets
import
build_dataloader
,
build_dataset
from
mmdet.models
import
build_detector
import
cv2
import
numpy
as
np
import
matplotlib.cm
as
cm
def
vis_seg
(
data
,
result
,
img_norm_cfg
,
data_id
,
colors
,
score_thr
,
save_dir
):
img_tensor
=
data
[
'img'
][
0
]
img_metas
=
data
[
'img_meta'
][
0
].
data
[
0
]
imgs
=
tensor2imgs
(
img_tensor
,
**
img_norm_cfg
)
assert
len
(
imgs
)
==
len
(
img_metas
)
class_names
=
get_classes
(
'coco'
)
for
img
,
img_meta
,
cur_result
in
zip
(
imgs
,
img_metas
,
result
):
if
cur_result
is
None
:
continue
h
,
w
,
_
=
img_meta
[
'img_shape'
]
img_show
=
img
[:
h
,
:
w
,
:]
seg_label
=
cur_result
[
0
]
seg_label
=
seg_label
.
cpu
().
numpy
().
astype
(
np
.
uint8
)
cate_label
=
cur_result
[
1
]
cate_label
=
cate_label
.
cpu
().
numpy
()
score
=
cur_result
[
2
].
cpu
().
numpy
()
vis_inds
=
score
>
score_thr
seg_label
=
seg_label
[
vis_inds
]
num_mask
=
seg_label
.
shape
[
0
]
cate_label
=
cate_label
[
vis_inds
]
cate_score
=
score
[
vis_inds
]
mask_density
=
[]
for
idx
in
range
(
num_mask
):
cur_mask
=
seg_label
[
idx
,
:,
:]
cur_mask
=
mmcv
.
imresize
(
cur_mask
,
(
w
,
h
))
cur_mask
=
(
cur_mask
>
0.5
).
astype
(
np
.
int32
)
mask_density
.
append
(
cur_mask
.
sum
())
orders
=
np
.
argsort
(
mask_density
)
seg_label
=
seg_label
[
orders
]
cate_label
=
cate_label
[
orders
]
cate_score
=
cate_score
[
orders
]
seg_show
=
img_show
.
copy
()
for
idx
in
range
(
num_mask
):
idx
=
-
(
idx
+
1
)
cur_mask
=
seg_label
[
idx
,
:,:]
cur_mask
=
mmcv
.
imresize
(
cur_mask
,
(
w
,
h
))
cur_mask
=
(
cur_mask
>
0.5
).
astype
(
np
.
uint8
)
if
cur_mask
.
sum
()
==
0
:
continue
color_mask
=
np
.
random
.
randint
(
0
,
256
,
(
1
,
3
),
dtype
=
np
.
uint8
)
cur_mask_bool
=
cur_mask
.
astype
(
np
.
bool
)
seg_show
[
cur_mask_bool
]
=
img_show
[
cur_mask_bool
]
*
0.5
+
color_mask
*
0.5
for
idx
in
range
(
num_mask
):
idx
=
-
(
idx
+
1
)
cur_mask
=
seg_label
[
idx
,
:,
:]
cur_mask
=
mmcv
.
imresize
(
cur_mask
,
(
w
,
h
))
cur_mask
=
(
cur_mask
>
0.5
).
astype
(
np
.
uint8
)
if
cur_mask
.
sum
()
==
0
:
continue
cur_cate
=
cate_label
[
idx
]
cur_score
=
cate_score
[
idx
]
label_text
=
class_names
[
cur_cate
]
#label_text += '|{:.02f}'.format(cur_score)
# center
center_y
,
center_x
=
ndimage
.
measurements
.
center_of_mass
(
cur_mask
)
vis_pos
=
(
max
(
int
(
center_x
)
-
10
,
0
),
int
(
center_y
))
cv2
.
putText
(
seg_show
,
label_text
,
vis_pos
,
cv2
.
FONT_HERSHEY_COMPLEX
,
0.3
,
(
255
,
255
,
255
))
# green
mmcv
.
imwrite
(
seg_show
,
'{}/{}.jpg'
.
format
(
save_dir
,
data_id
))
def
single_gpu_test
(
model
,
data_loader
,
args
,
cfg
=
None
,
verbose
=
True
):
model
.
eval
()
results
=
[]
dataset
=
data_loader
.
dataset
class_num
=
1000
# ins
colors
=
[(
np
.
random
.
random
((
1
,
3
))
*
255
).
tolist
()[
0
]
for
i
in
range
(
class_num
)]
prog_bar
=
mmcv
.
ProgressBar
(
len
(
dataset
))
for
i
,
data
in
enumerate
(
data_loader
):
with
torch
.
no_grad
():
seg_result
=
model
(
return_loss
=
False
,
rescale
=
True
,
**
data
)
result
=
None
results
.
append
(
result
)
if
verbose
:
vis_seg
(
data
,
seg_result
,
cfg
.
img_norm_cfg
,
data_id
=
i
,
colors
=
colors
,
score_thr
=
args
.
score_thr
,
save_dir
=
args
.
save_dir
)
batch_size
=
data
[
'img'
][
0
].
size
(
0
)
for
_
in
range
(
batch_size
):
prog_bar
.
update
()
return
results
def
multi_gpu_test
(
model
,
data_loader
,
tmpdir
=
None
):
model
.
eval
()
results
=
[]
dataset
=
data_loader
.
dataset
rank
,
world_size
=
get_dist_info
()
if
rank
==
0
:
prog_bar
=
mmcv
.
ProgressBar
(
len
(
dataset
))
for
i
,
data
in
enumerate
(
data_loader
):
with
torch
.
no_grad
():
result
=
model
(
return_loss
=
False
,
rescale
=
True
,
**
data
)
results
.
append
(
result
)
if
rank
==
0
:
batch_size
=
data
[
'img'
][
0
].
size
(
0
)
for
_
in
range
(
batch_size
*
world_size
):
prog_bar
.
update
()
# collect results from all ranks
results
=
collect_results
(
results
,
len
(
dataset
),
tmpdir
)
return
results
def
collect_results
(
result_part
,
size
,
tmpdir
=
None
):
rank
,
world_size
=
get_dist_info
()
# create a tmp dir if it is not specified
if
tmpdir
is
None
:
MAX_LEN
=
512
# 32 is whitespace
dir_tensor
=
torch
.
full
((
MAX_LEN
,
),
32
,
dtype
=
torch
.
uint8
,
device
=
'cuda'
)
if
rank
==
0
:
tmpdir
=
tempfile
.
mkdtemp
()
tmpdir
=
torch
.
tensor
(
bytearray
(
tmpdir
.
encode
()),
dtype
=
torch
.
uint8
,
device
=
'cuda'
)
dir_tensor
[:
len
(
tmpdir
)]
=
tmpdir
dist
.
broadcast
(
dir_tensor
,
0
)
tmpdir
=
dir_tensor
.
cpu
().
numpy
().
tobytes
().
decode
().
rstrip
()
else
:
mmcv
.
mkdir_or_exist
(
tmpdir
)
# dump the part result to the dir
mmcv
.
dump
(
result_part
,
osp
.
join
(
tmpdir
,
'part_{}.pkl'
.
format
(
rank
)))
dist
.
barrier
()
# collect all parts
if
rank
!=
0
:
return
None
else
:
# load results of all parts from tmp dir
part_list
=
[]
for
i
in
range
(
world_size
):
part_file
=
osp
.
join
(
tmpdir
,
'part_{}.pkl'
.
format
(
i
))
part_list
.
append
(
mmcv
.
load
(
part_file
))
# sort the results
ordered_results
=
[]
for
res
in
zip
(
*
part_list
):
ordered_results
.
extend
(
list
(
res
))
# the dataloader may pad some samples
ordered_results
=
ordered_results
[:
size
]
# remove tmp dir
shutil
.
rmtree
(
tmpdir
)
return
ordered_results
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'MMDet test detector'
)
parser
.
add_argument
(
'config'
,
help
=
'test config file path'
)
parser
.
add_argument
(
'checkpoint'
,
help
=
'checkpoint file'
)
parser
.
add_argument
(
'--out'
,
help
=
'output result file'
)
parser
.
add_argument
(
'--json_out'
,
help
=
'output result file name without extension'
,
type
=
str
)
parser
.
add_argument
(
'--eval'
,
type
=
str
,
nargs
=
'+'
,
choices
=
[
'proposal'
,
'proposal_fast'
,
'bbox'
,
'segm'
,
'keypoints'
],
help
=
'eval types'
)
parser
.
add_argument
(
'--show'
,
action
=
'store_true'
,
help
=
'show results'
)
parser
.
add_argument
(
'--score_thr'
,
type
=
float
,
default
=
0.3
,
help
=
'score threshold for visualization'
)
parser
.
add_argument
(
'--tmpdir'
,
help
=
'tmp dir for writing some results'
)
parser
.
add_argument
(
'--save_dir'
,
help
=
'dir for saveing visualized images'
)
parser
.
add_argument
(
'--launcher'
,
choices
=
[
'none'
,
'pytorch'
,
'slurm'
,
'mpi'
],
default
=
'none'
,
help
=
'job launcher'
)
parser
.
add_argument
(
'--local_rank'
,
type
=
int
,
default
=
0
)
args
=
parser
.
parse_args
()
if
'LOCAL_RANK'
not
in
os
.
environ
:
os
.
environ
[
'LOCAL_RANK'
]
=
str
(
args
.
local_rank
)
return
args
def
main
():
args
=
parse_args
()
assert
args
.
out
or
args
.
show
or
args
.
json_out
,
\
(
'Please specify at least one operation (save or show the results) '
'with the argument "--out" or "--show" or "--json_out"'
)
if
args
.
out
is
not
None
and
not
args
.
out
.
endswith
((
'.pkl'
,
'.pickle'
)):
raise
ValueError
(
'The output file must be a pkl file.'
)
if
args
.
json_out
is
not
None
and
args
.
json_out
.
endswith
(
'.json'
):
args
.
json_out
=
args
.
json_out
[:
-
5
]
cfg
=
mmcv
.
Config
.
fromfile
(
args
.
config
)
# set cudnn_benchmark
if
cfg
.
get
(
'cudnn_benchmark'
,
False
):
torch
.
backends
.
cudnn
.
benchmark
=
True
cfg
.
model
.
pretrained
=
None
cfg
.
data
.
test
.
test_mode
=
True
# init distributed env first, since logger depends on the dist info.
if
args
.
launcher
==
'none'
:
distributed
=
False
else
:
distributed
=
True
init_dist
(
args
.
launcher
,
**
cfg
.
dist_params
)
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset
=
build_dataset
(
cfg
.
data
.
test
)
data_loader
=
build_dataloader
(
dataset
,
imgs_per_gpu
=
1
,
workers_per_gpu
=
cfg
.
data
.
workers_per_gpu
,
dist
=
distributed
,
shuffle
=
False
)
# build the model and load checkpoint
model
=
build_detector
(
cfg
.
model
,
train_cfg
=
None
,
test_cfg
=
cfg
.
test_cfg
)
fp16_cfg
=
cfg
.
get
(
'fp16'
,
None
)
if
fp16_cfg
is
not
None
:
wrap_fp16_model
(
model
)
checkpoint
=
load_checkpoint
(
model
,
args
.
checkpoint
,
map_location
=
'cpu'
)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if
'CLASSES'
in
checkpoint
[
'meta'
]:
model
.
CLASSES
=
checkpoint
[
'meta'
][
'CLASSES'
]
else
:
model
.
CLASSES
=
dataset
.
CLASSES
assert
not
distributed
if
not
distributed
:
model
=
MMDataParallel
(
model
,
device_ids
=
[
0
])
outputs
=
single_gpu_test
(
model
,
data_loader
,
args
,
cfg
=
cfg
)
else
:
model
=
MMDistributedDataParallel
(
model
.
cuda
())
outputs
=
multi_gpu_test
(
model
,
data_loader
,
args
.
tmpdir
)
rank
,
_
=
get_dist_info
()
if
args
.
out
and
rank
==
0
:
print
(
'
\n
writing results to {}'
.
format
(
args
.
out
))
mmcv
.
dump
(
outputs
,
args
.
out
)
eval_types
=
args
.
eval
if
eval_types
:
print
(
'Starting evaluate {}'
.
format
(
' and '
.
join
(
eval_types
)))
if
eval_types
==
[
'proposal_fast'
]:
result_file
=
args
.
out
coco_eval
(
result_file
,
eval_types
,
dataset
.
coco
)
else
:
if
not
isinstance
(
outputs
[
0
],
dict
):
result_files
=
results2json
(
dataset
,
outputs
,
args
.
out
)
coco_eval
(
result_files
,
eval_types
,
dataset
.
coco
)
else
:
for
name
in
outputs
[
0
]:
print
(
'
\n
Evaluating {}'
.
format
(
name
))
outputs_
=
[
out
[
name
]
for
out
in
outputs
]
result_file
=
args
.
out
+
'.{}'
.
format
(
name
)
result_files
=
results2json
(
dataset
,
outputs_
,
result_file
)
coco_eval
(
result_files
,
eval_types
,
dataset
.
coco
)
# Save predictions in the COCO json format
if
args
.
json_out
and
rank
==
0
:
if
not
isinstance
(
outputs
[
0
],
dict
):
results2json
(
dataset
,
outputs
,
args
.
json_out
)
else
:
for
name
in
outputs
[
0
]:
outputs_
=
[
out
[
name
]
for
out
in
outputs
]
result_file
=
args
.
json_out
+
'.{}'
.
format
(
name
)
results2json
(
dataset
,
outputs_
,
result_file
)
if
__name__
==
'__main__'
:
main
()
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment