Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
7036cc9f
Commit
7036cc9f
authored
Oct 27, 2022
by
ChaimZhu
Committed by
ZwwWayne
Dec 03, 2022
Browse files
[Fix] fix seg metric issues and circle ci (#1962)
* fix circle ci * update * update
parent
2bd4f07e
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
166 additions
and
113 deletions
+166
-113
mmdet3d/datasets/s3dis_dataset.py
mmdet3d/datasets/s3dis_dataset.py
+0
-1
mmdet3d/datasets/seg3d_dataset.py
mmdet3d/datasets/seg3d_dataset.py
+6
-0
mmdet3d/evaluation/metrics/seg_metric.py
mmdet3d/evaluation/metrics/seg_metric.py
+111
-88
tests/test_datasets/test_scannet_dataset.py
tests/test_datasets/test_scannet_dataset.py
+33
-16
tests/test_evaluation/test_metrics/test_seg_metric.py
tests/test_evaluation/test_metrics/test_seg_metric.py
+16
-8
No files found.
mmdet3d/datasets/s3dis_dataset.py
View file @
7036cc9f
...
...
@@ -316,7 +316,6 @@ class S3DISSegDataset(_S3DISSegDataset):
ignore_index
=
ignore_index
,
scene_idxs
=
scene_idxs
[
i
],
test_mode
=
test_mode
,
serialize_data
=
False
,
**
kwargs
)
for
i
in
range
(
len
(
ann_files
))
]
...
...
mmdet3d/datasets/seg3d_dataset.py
View file @
7036cc9f
...
...
@@ -39,6 +39,10 @@ class Seg3DDataset(BaseDataset):
Defaults to None.
test_mode (bool): Whether the dataset is in test mode.
Defaults to False.
serialize_data (bool, optional): Whether to hold memory using
serialized objects, when enabled, data loader workers can use
shared RAM from master process instead of making a copy. Defaults
to False for 3D Segmentation datasets.
load_eval_anns (bool): Whether to load annotations in test_mode,
the annotation will be save in `eval_ann_infos`, which can be used
in Evaluator. Defaults to True.
...
...
@@ -66,6 +70,7 @@ class Seg3DDataset(BaseDataset):
ignore_index
:
Optional
[
int
]
=
None
,
scene_idxs
:
Optional
[
Union
[
str
,
np
.
ndarray
]]
=
None
,
test_mode
:
bool
=
False
,
serialize_data
=
False
,
load_eval_anns
:
bool
=
True
,
file_client_args
:
dict
=
dict
(
backend
=
'disk'
),
**
kwargs
)
->
None
:
...
...
@@ -115,6 +120,7 @@ class Seg3DDataset(BaseDataset):
data_prefix
=
data_prefix
,
pipeline
=
pipeline
,
test_mode
=
test_mode
,
serialize_data
=
serialize_data
,
**
kwargs
)
self
.
metainfo
[
'seg_label_mapping'
]
=
self
.
seg_label_mapping
...
...
mmdet3d/evaluation/metrics/seg_metric.py
View file @
7036cc9f
# Copyright (c) OpenMMLab. All rights reserved.
import
warnings
from
typing
import
Sequence
import
os.path
as
osp
import
tempfile
from
typing
import
Dict
,
Optional
,
Sequence
from
mmengine.logging
import
print_log
from
mmeval.metrics
import
MeanIoU
from
terminaltables
import
AsciiTable
import
mmcv
import
numpy
as
np
from
mmengine.evaluator
import
BaseMetric
from
mmengine.logging
import
MMLogger
from
mmdet3d.evaluation
import
seg_eval
from
mmdet3d.registry
import
METRICS
@
METRICS
.
register_module
()
class
SegMetric
(
MeanIoU
):
"""A wrapper of ``mmeval.MeanIoU`` for 3D semantic segmentation.
This wrapper implements the `process` method that parses predictions and
labels from inputs. This enables ``mmengine.Evaluator`` to handle the data
flow of different tasks through a unified interface.
In addition, this wrapper also implements the ``evaluate`` method that
parses metric results and print pretty table of metrics per class.
class
SegMetric
(
BaseMetric
):
"""3D semantic segmentation evaluation metric.
Args:
dist_backend (str | None): The name of the distributed communication
backend. Refer to :class:`mmeval.BaseMetric`.
Defaults to 'torch_cuda'.
**kwargs: Keyword parameters passed to :class:`mmeval.MeanIoU`.
collect_device (str, optional): Device name used for collecting
results from different ranks during distributed training.
Must be 'cpu' or 'gpu'. Defaults to 'cpu'.
prefix (str): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None.
pklfile_prefix (str, optional): The prefix of pkl files, including
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
submission_prefix (str, optional): The prefix of submission data.
If not specified, the submission data will not be generated.
Default: None.
"""
def
__init__
(
self
,
dist_backend
=
'torch_cpu'
,
**
kwargs
):
iou_metrics
=
kwargs
.
pop
(
'iou_metrics'
,
None
)
if
iou_metrics
is
not
None
:
warnings
.
warn
(
'DeprecationWarning: The `iou_metrics` parameter of '
'`IoUMetric` is deprecated, defaults return all metrics now!'
)
collect_device
=
kwargs
.
pop
(
'collect_device'
,
None
)
if
collect_device
is
not
None
:
warnings
.
warn
(
'DeprecationWarning: The `collect_device` parameter of '
'`IoUMetric` is deprecated, use `dist_backend` instead.'
)
# Changes the default value of `classwise_results` to True.
super
().
__init__
(
classwise_results
=
True
,
dist_backend
=
dist_backend
,
**
kwargs
)
def
__init__
(
self
,
collect_device
:
str
=
'cpu'
,
prefix
:
Optional
[
str
]
=
None
,
pklfile_prefix
:
str
=
None
,
submission_prefix
:
str
=
None
,
**
kwargs
):
self
.
pklfile_prefix
=
pklfile_prefix
self
.
submission_prefix
=
submission_prefix
super
(
SegMetric
,
self
).
__init__
(
prefix
=
prefix
,
collect_device
=
collect_device
)
def
process
(
self
,
data_batch
:
dict
,
data_samples
:
Sequence
[
dict
])
->
None
:
"""Process one batch of data samples and predictions.
...
...
@@ -55,60 +55,83 @@ class SegMetric(MeanIoU):
data_samples (Sequence[dict]): A batch of outputs from
the model.
"""
predictions
,
labels
=
[],
[]
for
data_sample
in
data_samples
:
# (num_points, ) -> (num_points, 1)
pred
=
data_sample
[
'pred_pts_seg'
][
'pts_semantic_mask'
].
unsqueeze
(
-
1
)
label
=
data_sample
[
'gt_pts_seg'
][
'pts_semantic_mask'
].
unsqueeze
(
-
1
)
predictions
.
append
(
pred
)
labels
.
append
(
label
)
self
.
add
(
predictions
,
labels
)
def
evaluate
(
self
,
*
args
,
**
kwargs
):
"""Returns metric results and print pretty table of metrics per class.
This method would be invoked by ``mmengine.Evaluator``.
pred_3d
=
data_sample
[
'pred_pts_seg'
]
eval_ann_info
=
data_sample
[
'eval_ann_info'
]
cpu_pred_3d
=
dict
()
for
k
,
v
in
pred_3d
.
items
():
if
hasattr
(
v
,
'to'
):
cpu_pred_3d
[
k
]
=
v
.
to
(
'cpu'
).
numpy
()
else
:
cpu_pred_3d
[
k
]
=
v
self
.
results
.
append
((
eval_ann_info
,
cpu_pred_3d
))
def
format_results
(
self
,
results
):
r
"""Format the results to txt file. Refer to `ScanNet documentation
<http://kaldir.vc.in.tum.de/scannet_benchmark/documentation>`_.
Args:
outputs (list[dict]): Testing results of the dataset.
Returns:
tuple: (outputs, tmp_dir), outputs is the detection results,
tmp_dir is the temporal directory created for saving submission
files when ``submission_prefix`` is not specified.
"""
metric_results
=
self
.
compute
(
*
args
,
**
kwargs
)
self
.
reset
()
classwise_results
=
metric_results
[
'classwise_results'
]
del
metric_results
[
'classwise_results'
]
# Ascii table of the metric results per class.
header
=
[
'Class'
]
header
+=
classwise_results
.
keys
()
classes
=
self
.
dataset_meta
[
'classes'
]
table_data
=
[
header
]
for
i
in
range
(
self
.
num_classes
):
row_data
=
[
classes
[
i
]]
for
_
,
value
in
classwise_results
.
items
():
row_data
.
append
(
f
'
{
value
[
i
]
*
100
:.
2
f
}
'
)
table_data
.
append
(
row_data
)
table
=
AsciiTable
(
table_data
)
print_log
(
'per class results:'
,
logger
=
'current'
)
print_log
(
'
\n
'
+
table
.
table
,
logger
=
'current'
)
# Ascii table of the metric results overall.
header
=
[
'Class'
]
header
+=
metric_results
.
keys
()
table_data
=
[
header
]
row_data
=
[
'results'
]
for
_
,
value
in
metric_results
.
items
():
row_data
.
append
(
f
'
{
value
*
100
:.
2
f
}
'
)
table_data
.
append
(
row_data
)
table
=
AsciiTable
(
table_data
)
table
.
inner_footing_row_border
=
True
print_log
(
'overall results:'
,
logger
=
'current'
)
print_log
(
'
\n
'
+
table
.
table
,
logger
=
'current'
)
# Multiply value by 100 to convert to percentage and rounding.
evaluate_results
=
{
k
:
round
(
v
*
100
,
2
)
for
k
,
v
in
metric_results
.
items
()
}
return
evaluate_results
submission_prefix
=
self
.
submission_prefix
if
submission_prefix
is
None
:
tmp_dir
=
tempfile
.
TemporaryDirectory
()
submission_prefix
=
osp
.
join
(
tmp_dir
.
name
,
'results'
)
mmcv
.
mkdir_or_exist
(
submission_prefix
)
ignore_index
=
self
.
dataset_meta
[
'ignore_index'
]
# need to map network output to original label idx
cat2label
=
np
.
zeros
(
len
(
self
.
dataset_meta
[
'label2cat'
])).
astype
(
np
.
int
)
for
original_label
,
output_idx
in
self
.
dataset_meta
[
'label2cat'
].
items
(
):
if
output_idx
!=
ignore_index
:
cat2label
[
output_idx
]
=
original_label
for
i
,
(
eval_ann
,
result
)
in
enumerate
(
results
):
sample_idx
=
eval_ann
[
'point_cloud'
][
'lidar_idx'
]
pred_sem_mask
=
result
[
'semantic_mask'
].
numpy
().
astype
(
np
.
int
)
pred_label
=
cat2label
[
pred_sem_mask
]
curr_file
=
f
'
{
submission_prefix
}
/
{
sample_idx
}
.txt'
np
.
savetxt
(
curr_file
,
pred_label
,
fmt
=
'%d'
)
def
compute_metrics
(
self
,
results
:
list
)
->
Dict
[
str
,
float
]:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
Dict[str, float]: The computed metrics. The keys are the names of
the metrics, and the values are corresponding results.
"""
logger
:
MMLogger
=
MMLogger
.
get_current_instance
()
if
self
.
submission_prefix
:
self
.
format_results
(
results
)
return
None
label2cat
=
self
.
dataset_meta
[
'label2cat'
]
ignore_index
=
self
.
dataset_meta
[
'ignore_index'
]
gt_semantic_masks
=
[]
pred_semantic_masks
=
[]
for
eval_ann
,
sinlge_pred_results
in
results
:
gt_semantic_masks
.
append
(
eval_ann
[
'pts_semantic_mask'
])
pred_semantic_masks
.
append
(
sinlge_pred_results
[
'pts_semantic_mask'
])
ret_dict
=
seg_eval
(
gt_semantic_masks
,
pred_semantic_masks
,
label2cat
,
ignore_index
,
logger
=
logger
)
return
ret_dict
tests/test_datasets/test_scannet_dataset.py
View file @
7036cc9f
...
...
@@ -39,7 +39,7 @@ def _generate_scannet_seg_dataset_config():
[
227
,
119
,
194
],
[
82
,
84
,
163
],
]
scene_idxs
=
[
0
for
_
in
range
(
20
)
]
scene_idxs
=
[
0
]
modality
=
dict
(
use_lidar
=
True
,
use_camera
=
False
)
pipeline
=
[
dict
(
...
...
@@ -83,22 +83,39 @@ def _generate_scannet_dataset_config():
'bookshelf'
,
'picture'
,
'counter'
,
'desk'
,
'curtain'
,
'refrigerator'
,
'showercurtrain'
,
'toilet'
,
'sink'
,
'bathtub'
,
'garbagebin'
)
# TODO add pipline
from
mmcv.transforms.base
import
BaseTransform
from
mmengine.registry
import
TRANSFORMS
if
'Identity'
not
in
TRANSFORMS
:
@
TRANSFORMS
.
register_module
()
class
Identity
(
BaseTransform
):
def
transform
(
self
,
info
):
if
'ann_info'
in
info
:
info
[
'gt_labels_3d'
]
=
info
[
'ann_info'
][
'gt_labels_3d'
]
return
info
modality
=
dict
(
use_lidar
=
True
,
use_camera
=
False
)
pipeline
=
[
dict
(
type
=
'Identity'
),
dict
(
type
=
'LoadPointsFromFile'
,
coord_type
=
'DEPTH'
,
shift_height
=
True
,
load_dim
=
6
,
use_dim
=
[
0
,
1
,
2
]),
dict
(
type
=
'LoadAnnotations3D'
,
with_bbox_3d
=
True
,
with_label_3d
=
True
,
with_mask_3d
=
True
,
with_seg_3d
=
True
),
dict
(
type
=
'GlobalAlignment'
,
rotation_axis
=
2
),
dict
(
type
=
'PointSegClassMapping'
),
dict
(
type
=
'PointSample'
,
num_points
=
5
),
dict
(
type
=
'RandomFlip3D'
,
sync_2d
=
False
,
flip_ratio_bev_horizontal
=
1.0
,
flip_ratio_bev_vertical
=
1.0
),
dict
(
type
=
'GlobalRotScaleTrans'
,
rot_range
=
[
-
0.087266
,
0.087266
],
scale_ratio_range
=
[
1.0
,
1.0
],
shift_height
=
True
),
dict
(
type
=
'Pack3DDetInputs'
,
keys
=
[
'points'
,
'pts_semantic_mask'
,
'gt_bboxes_3d'
,
'gt_labels_3d'
,
'pts_instance_mask'
])
]
data_prefix
=
dict
(
pts
=
'points'
,
...
...
@@ -113,7 +130,7 @@ class TestScanNetDataset(unittest.TestCase):
np
.
random
.
seed
(
0
)
data_root
,
ann_file
,
classes
,
data_prefix
,
\
pipeline
,
modality
,
=
_generate_scannet_dataset_config
()
register_all_modules
()
scannet_dataset
=
ScanNetDataset
(
data_root
,
ann_file
,
...
...
tests/test_evaluation/test_metrics/test_seg_metric.py
View file @
7036cc9f
# Copyright (c) OpenMMLab. All rights reserved.
import
unittest
import
numpy
as
np
import
torch
from
mmengine.structures
import
BaseDataElement
...
...
@@ -12,18 +13,19 @@ class TestSegMetric(unittest.TestCase):
def
_demo_mm_model_output
(
self
):
"""Create a superset of inputs needed to run test or train batches."""
pred_pts_semantic_mask
=
torch
.
Long
Tensor
([
pred_pts_semantic_mask
=
torch
.
Tensor
([
0
,
0
,
1
,
0
,
0
,
2
,
1
,
3
,
1
,
2
,
1
,
0
,
2
,
2
,
2
,
2
,
1
,
3
,
0
,
3
,
3
,
3
,
3
])
pred_pts_seg_data
=
dict
(
pts_semantic_mask
=
pred_pts_semantic_mask
)
data_sample
=
Det3DDataSample
()
data_sample
.
pred_pts_seg
=
PointData
(
**
pred_pts_seg_data
)
gt_pts_semantic_mask
=
torch
.
LongTensor
(([
0
,
0
,
0
,
4
,
0
,
0
,
1
,
1
,
1
,
4
,
1
,
1
,
2
,
2
,
2
,
2
,
2
,
3
,
3
,
3
,
3
,
3
,
4
]))
gt_pts_seg_data
=
dict
(
pts_semantic_mask
=
gt_pts_semantic_mask
)
data_sample
.
gt_pts_seg
=
PointData
(
**
gt_pts_seg_data
)
gt_pts_semantic_mask
=
np
.
array
([
0
,
0
,
0
,
255
,
0
,
0
,
1
,
1
,
1
,
255
,
1
,
1
,
2
,
2
,
2
,
2
,
2
,
3
,
3
,
3
,
3
,
3
,
255
])
ann_info_data
=
dict
(
pts_semantic_mask
=
gt_pts_semantic_mask
)
data_sample
.
eval_ann_info
=
ann_info_data
batch_data_samples
=
[
data_sample
]
...
...
@@ -38,8 +40,14 @@ class TestSegMetric(unittest.TestCase):
def
test_evaluate
(
self
):
data_batch
=
{}
predictions
=
self
.
_demo_mm_model_output
()
dataset_meta
=
dict
(
classes
=
(
'car'
,
'bicyle'
,
'motorcycle'
,
'truck'
))
seg_metric
=
SegMetric
(
ignore_index
=
len
(
dataset_meta
[
'classes'
]))
label2cat
=
{
0
:
'car'
,
1
:
'bicycle'
,
2
:
'motorcycle'
,
3
:
'truck'
,
}
dataset_meta
=
dict
(
label2cat
=
label2cat
,
ignore_index
=
255
)
seg_metric
=
SegMetric
()
seg_metric
.
dataset_meta
=
dataset_meta
seg_metric
.
process
(
data_batch
,
predictions
)
res
=
seg_metric
.
evaluate
(
1
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment