Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
ModelZoo
SOLOv2-pytorch
Commits
976629d4
Commit
976629d4
authored
Jun 17, 2019
by
Cao Yuhang
Browse files
support segm evaluation using different score from bbox det
parent
dc341cb8
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
52 additions
and
23 deletions
+52
-23
mmdet/core/evaluation/coco_utils.py
mmdet/core/evaluation/coco_utils.py
+39
-11
mmdet/core/evaluation/eval_hooks.py
mmdet/core/evaluation/eval_hooks.py
+7
-6
tools/test.py
tools/test.py
+6
-6
No files found.
mmdet/core/evaluation/coco_utils.py
View file @
976629d4
...
@@ -6,7 +6,7 @@ from pycocotools.cocoeval import COCOeval
...
@@ -6,7 +6,7 @@ from pycocotools.cocoeval import COCOeval
from
.recall
import
eval_recalls
from
.recall
import
eval_recalls
def
coco_eval
(
result_file
,
result_types
,
coco
,
max_dets
=
(
100
,
300
,
1000
)):
def
coco_eval
(
result_file
s
,
result_types
,
coco
,
max_dets
=
(
100
,
300
,
1000
)):
for
res_type
in
result_types
:
for
res_type
in
result_types
:
assert
res_type
in
[
assert
res_type
in
[
'proposal'
,
'proposal_fast'
,
'bbox'
,
'segm'
,
'keypoints'
'proposal'
,
'proposal_fast'
,
'bbox'
,
'segm'
,
'keypoints'
...
@@ -17,16 +17,17 @@ def coco_eval(result_file, result_types, coco, max_dets=(100, 300, 1000)):
...
@@ -17,16 +17,17 @@ def coco_eval(result_file, result_types, coco, max_dets=(100, 300, 1000)):
assert
isinstance
(
coco
,
COCO
)
assert
isinstance
(
coco
,
COCO
)
if
result_types
==
[
'proposal_fast'
]:
if
result_types
==
[
'proposal_fast'
]:
ar
=
fast_eval_recall
(
result_file
,
coco
,
np
.
array
(
max_dets
))
ar
=
fast_eval_recall
(
result_file
s
,
coco
,
np
.
array
(
max_dets
))
for
i
,
num
in
enumerate
(
max_dets
):
for
i
,
num
in
enumerate
(
max_dets
):
print
(
'AR@{}
\t
= {:.4f}'
.
format
(
num
,
ar
[
i
]))
print
(
'AR@{}
\t
= {:.4f}'
.
format
(
num
,
ar
[
i
]))
return
return
assert
result_file
.
endswith
(
'.json'
)
coco_dets
=
coco
.
loadRes
(
result_file
)
img_ids
=
coco
.
getImgIds
()
for
res_type
in
result_types
:
for
res_type
in
result_types
:
result_file
=
result_files
[
res_type
]
assert
result_file
.
endswith
(
'.json'
)
coco_dets
=
coco
.
loadRes
(
result_file
)
img_ids
=
coco
.
getImgIds
()
iou_type
=
'bbox'
if
res_type
==
'proposal'
else
res_type
iou_type
=
'bbox'
if
res_type
==
'proposal'
else
res_type
cocoEval
=
COCOeval
(
coco
,
coco_dets
,
iou_type
)
cocoEval
=
COCOeval
(
coco
,
coco_dets
,
iou_type
)
cocoEval
.
params
.
imgIds
=
img_ids
cocoEval
.
params
.
imgIds
=
img_ids
...
@@ -118,32 +119,59 @@ def det2json(dataset, results):
...
@@ -118,32 +119,59 @@ def det2json(dataset, results):
def
segm2json
(
dataset
,
results
):
def
segm2json
(
dataset
,
results
):
json_results
=
[]
bbox_json_results
=
[]
segm_json_results
=
[]
for
idx
in
range
(
len
(
dataset
)):
for
idx
in
range
(
len
(
dataset
)):
img_id
=
dataset
.
img_ids
[
idx
]
img_id
=
dataset
.
img_ids
[
idx
]
det
,
seg
=
results
[
idx
]
det
,
seg
=
results
[
idx
]
for
label
in
range
(
len
(
det
)):
for
label
in
range
(
len
(
det
)):
# bbox results
bboxes
=
det
[
label
]
bboxes
=
det
[
label
]
segms
=
seg
[
label
]
for
i
in
range
(
bboxes
.
shape
[
0
]):
for
i
in
range
(
bboxes
.
shape
[
0
]):
data
=
dict
()
data
=
dict
()
data
[
'image_id'
]
=
img_id
data
[
'image_id'
]
=
img_id
data
[
'bbox'
]
=
xyxy2xywh
(
bboxes
[
i
])
data
[
'bbox'
]
=
xyxy2xywh
(
bboxes
[
i
])
data
[
'score'
]
=
float
(
bboxes
[
i
][
4
])
data
[
'score'
]
=
float
(
bboxes
[
i
][
4
])
data
[
'category_id'
]
=
dataset
.
cat_ids
[
label
]
data
[
'category_id'
]
=
dataset
.
cat_ids
[
label
]
bbox_json_results
.
append
(
data
)
# segm results
# some detectors use different score for det and segm
if
len
(
seg
)
==
2
:
segms
=
seg
[
0
][
label
]
mask_score
=
seg
[
1
][
label
]
else
:
segms
=
seg
[
label
]
mask_score
=
[
bbox
[
4
]
for
bbox
in
bboxes
]
for
i
in
range
(
bboxes
.
shape
[
0
]):
data
=
dict
()
data
[
'image_id'
]
=
img_id
data
[
'score'
]
=
float
(
mask_score
[
i
])
data
[
'category_id'
]
=
dataset
.
cat_ids
[
label
]
segms
[
i
][
'counts'
]
=
segms
[
i
][
'counts'
].
decode
()
segms
[
i
][
'counts'
]
=
segms
[
i
][
'counts'
].
decode
()
data
[
'segmentation'
]
=
segms
[
i
]
data
[
'segmentation'
]
=
segms
[
i
]
json_results
.
append
(
data
)
segm_
json_results
.
append
(
data
)
return
json_results
return
bbox_json_results
,
segm_
json_results
def
results2json
(
dataset
,
results
,
out_file
):
def
results2json
(
dataset
,
results
,
out_file
):
result_files
=
dict
()
if
isinstance
(
results
[
0
],
list
):
if
isinstance
(
results
[
0
],
list
):
json_results
=
det2json
(
dataset
,
results
)
json_results
=
det2json
(
dataset
,
results
)
result_files
[
'bbox'
]
=
'{}.{}.json'
.
format
(
out_file
,
'bbox'
)
result_files
[
'proposal'
]
=
'{}.{}.json'
.
format
(
out_file
,
'bbox'
)
mmcv
.
dump
(
json_results
,
result_files
[
'bbox'
])
elif
isinstance
(
results
[
0
],
tuple
):
elif
isinstance
(
results
[
0
],
tuple
):
json_results
=
segm2json
(
dataset
,
results
)
json_results
=
segm2json
(
dataset
,
results
)
result_files
[
'bbox'
]
=
'{}.{}.json'
.
format
(
out_file
,
'bbox'
)
result_files
[
'proposal'
]
=
'{}.{}.json'
.
format
(
out_file
,
'bbox'
)
result_files
[
'segm'
]
=
'{}.{}.json'
.
format
(
out_file
,
'segm'
)
mmcv
.
dump
(
json_results
[
0
],
result_files
[
'bbox'
])
mmcv
.
dump
(
json_results
[
1
],
result_files
[
'segm'
])
elif
isinstance
(
results
[
0
],
np
.
ndarray
):
elif
isinstance
(
results
[
0
],
np
.
ndarray
):
json_results
=
proposal2json
(
dataset
,
results
)
json_results
=
proposal2json
(
dataset
,
results
)
result_files
[
'proposal'
]
=
'{}.{}.json'
.
format
(
out_file
,
'proposal'
)
mmcv
.
dump
(
json_results
,
result_files
[
'proposal'
])
else
:
else
:
raise
TypeError
(
'invalid type of results'
)
raise
TypeError
(
'invalid type of results'
)
mmcv
.
dump
(
json_results
,
ou
t_file
)
return
resul
t_file
s
mmdet/core/evaluation/eval_hooks.py
View file @
976629d4
...
@@ -135,15 +135,15 @@ class CocoDistEvalRecallHook(DistEvalHook):
...
@@ -135,15 +135,15 @@ class CocoDistEvalRecallHook(DistEvalHook):
class
CocoDistEvalmAPHook
(
DistEvalHook
):
class
CocoDistEvalmAPHook
(
DistEvalHook
):
def
evaluate
(
self
,
runner
,
results
):
def
evaluate
(
self
,
runner
,
results
):
tmp_file
=
osp
.
join
(
runner
.
work_dir
,
'temp_0
.json
'
)
tmp_file
=
osp
.
join
(
runner
.
work_dir
,
'temp_0'
)
results2json
(
self
.
dataset
,
results
,
tmp_file
)
result_files
=
results2json
(
self
.
dataset
,
results
,
tmp_file
)
res_types
=
[
'bbox'
,
res_types
=
[
'bbox'
,
'segm'
'segm'
]
if
runner
.
model
.
module
.
with_mask
else
[
'bbox'
]
]
if
runner
.
model
.
module
.
with_mask
else
[
'bbox'
]
cocoGt
=
self
.
dataset
.
coco
cocoGt
=
self
.
dataset
.
coco
cocoDt
=
cocoGt
.
loadRes
(
tmp_file
)
imgIds
=
cocoGt
.
getImgIds
()
imgIds
=
cocoGt
.
getImgIds
()
for
res_type
in
res_types
:
for
res_type
in
res_types
:
cocoDt
=
cocoGt
.
loadRes
(
result_files
[
res_type
])
iou_type
=
res_type
iou_type
=
res_type
cocoEval
=
COCOeval
(
cocoGt
,
cocoDt
,
iou_type
)
cocoEval
=
COCOeval
(
cocoGt
,
cocoDt
,
iou_type
)
cocoEval
.
params
.
imgIds
=
imgIds
cocoEval
.
params
.
imgIds
=
imgIds
...
@@ -159,4 +159,5 @@ class CocoDistEvalmAPHook(DistEvalHook):
...
@@ -159,4 +159,5 @@ class CocoDistEvalmAPHook(DistEvalHook):
'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
'{ap[4]:.3f} {ap[5]:.3f}'
).
format
(
ap
=
cocoEval
.
stats
[:
6
])
'{ap[4]:.3f} {ap[5]:.3f}'
).
format
(
ap
=
cocoEval
.
stats
[:
6
])
runner
.
log_buffer
.
ready
=
True
runner
.
log_buffer
.
ready
=
True
os
.
remove
(
tmp_file
)
for
res_type
in
res_types
:
os
.
remove
(
result_files
[
res_type
])
tools/test.py
View file @
976629d4
...
@@ -184,16 +184,16 @@ def main():
...
@@ -184,16 +184,16 @@ def main():
coco_eval
(
result_file
,
eval_types
,
dataset
.
coco
)
coco_eval
(
result_file
,
eval_types
,
dataset
.
coco
)
else
:
else
:
if
not
isinstance
(
outputs
[
0
],
dict
):
if
not
isinstance
(
outputs
[
0
],
dict
):
result_file
=
args
.
out
+
'.json'
result_files
=
results2json
(
dataset
,
outputs
,
args
.
out
)
results2json
(
dataset
,
outputs
,
result_file
)
coco_eval
(
result_files
,
eval_types
,
dataset
.
coco
)
coco_eval
(
result_file
,
eval_types
,
dataset
.
coco
)
else
:
else
:
for
name
in
outputs
[
0
]:
for
name
in
outputs
[
0
]:
print
(
'
\n
Evaluating {}'
.
format
(
name
))
print
(
'
\n
Evaluating {}'
.
format
(
name
))
outputs_
=
[
out
[
name
]
for
out
in
outputs
]
outputs_
=
[
out
[
name
]
for
out
in
outputs
]
result_file
=
args
.
out
+
'.{}.json'
.
format
(
name
)
result_file
=
args
.
out
+
'.{}'
.
format
(
name
)
results2json
(
dataset
,
outputs_
,
result_file
)
result_files
=
results2json
(
dataset
,
outputs_
,
coco_eval
(
result_file
,
eval_types
,
dataset
.
coco
)
result_file
)
coco_eval
(
result_files
,
eval_types
,
dataset
.
coco
)
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment