Commit 35667791 authored by ZCMax's avatar ZCMax Committed by ChaimZhu
Browse files

[Refactor] Refactor 3D seg and instance seg metric

parent 2a7030a5
# Copyright (c) OpenMMLab. All rights reserved.
from .indoor_metric import IndoorMetric # noqa: F401,F403
from .instance_seg_metric import InstanceSegMetric # noqa: F401,F403
from .kitti_metric import KittiMetric # noqa: F401,F403
from .lyft_metric import LyftMetric # noqa: F401,F403
from .nuscenes_metric import NuScenesMetric # noqa: F401,F403
from .seg_metric import SegMetric # noqa: F401,F403
__all_ = ['KittiMetric', 'NuScenesMetric', 'IndoorMetric', 'LyftMetric']
__all_ = [
'KittiMetric', 'NuScenesMetric', 'IndoorMetric', 'LyftMetric', 'SegMetric',
'InstanceSegMetric'
]
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Optional, Sequence
from mmengine.evaluator import BaseMetric
from mmengine.logging import MMLogger
from mmdet3d.core import instance_seg_eval
from mmdet3d.registry import METRICS
@METRICS.register_module()
class InstanceSegMetric(BaseMetric):
"""3D instance segmentation evaluation metric.
Args:
collect_device (str, optional): Device name used for collecting
results from different ranks during distributed training.
Must be 'cpu' or 'gpu'. Defaults to 'cpu'.
prefix (str): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None
"""
def __init__(self,
collect_device: str = 'cpu',
prefix: Optional[str] = None,
**kwargs):
super(InstanceSegMetric, self).__init__(
prefix=prefix, collect_device=collect_device)
def process(self, data_batch: Sequence[dict],
predictions: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions.
The processed results should be stored in ``self.results``,
which will be used to compute the metrics when all batches
have been processed.
Args:
data_batch (Sequence[dict]): A batch of data
from the dataloader.
predictions (Sequence[dict]): A batch of outputs from
the model.
"""
batch_eval_anns = [
item['data_sample']['eval_ann_info'] for item in data_batch
]
for eval_ann, pred_dict in zip(batch_eval_anns, predictions):
pred_3d = pred_dict['pred_pts_seg']
cpu_pred_3d = dict()
for k, v in pred_3d.items():
if hasattr(v, 'to'):
cpu_pred_3d[k] = v.to('cpu')
else:
cpu_pred_3d[k] = v
self.results.append((eval_ann, cpu_pred_3d))
def compute_metrics(self, results: list) -> Dict[str, float]:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
Dict[str, float]: The computed metrics. The keys are the names of
the metrics, and the values are corresponding results.
"""
logger: MMLogger = MMLogger.get_current_instance()
self.classes = self.dataset_meta['CLASSES']
self.valid_class_ids = self.dataset_meta['VALID_CLASS_IDS']
gt_semantic_masks = []
gt_instance_masks = []
pred_instance_masks = []
pred_instance_labels = []
pred_instance_scores = []
for eval_ann, sinlge_pred_results in results:
gt_semantic_masks.append(eval_ann['pts_semantic_mask'])
gt_instance_masks.append(eval_ann['pts_instance_mask'])
pred_instance_masks.append(sinlge_pred_results['pts_intance_mask'])
pred_instance_labels.append(sinlge_pred_results['instance_label'])
pred_instance_scores.append(sinlge_pred_results['instance_score'])
ret_dict = instance_seg_eval(
gt_semantic_masks,
gt_instance_masks,
pred_instance_masks,
pred_instance_labels,
pred_instance_scores,
valid_class_ids=self.valid_class_ids,
class_labels=self.classes,
logger=logger)
return ret_dict
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, Optional, Sequence
from mmengine.evaluator import BaseMetric
from mmengine.logging import MMLogger
from mmdet3d.core import seg_eval
from mmdet3d.registry import METRICS
@METRICS.register_module()
class SegMetric(BaseMetric):
"""3D semantic segmentation evaluation metric.
Args:
collect_device (str, optional): Device name used for collecting
results from different ranks during distributed training.
Must be 'cpu' or 'gpu'. Defaults to 'cpu'.
prefix (str): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None
"""
def __init__(self,
collect_device: str = 'cpu',
prefix: Optional[str] = None,
**kwargs):
super(SegMetric, self).__init__(
prefix=prefix, collect_device=collect_device)
def process(self, data_batch: Sequence[dict],
predictions: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions.
The processed results should be stored in ``self.results``,
which will be used to compute the metrics when all batches
have been processed.
Args:
data_batch (Sequence[dict]): A batch of data
from the dataloader.
predictions (Sequence[dict]): A batch of outputs from
the model.
"""
batch_eval_anns = [
item['data_sample']['eval_ann_info'] for item in data_batch
]
for eval_ann, pred_dict in zip(batch_eval_anns, predictions):
pred_3d = pred_dict['pred_pts_seg']
cpu_pred_3d = dict()
for k, v in pred_3d.items():
if hasattr(v, 'to'):
cpu_pred_3d[k] = v.to('cpu')
else:
cpu_pred_3d[k] = v
self.results.append((eval_ann, cpu_pred_3d))
def compute_metrics(self, results: list) -> Dict[str, float]:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
Dict[str, float]: The computed metrics. The keys are the names of
the metrics, and the values are corresponding results.
"""
logger: MMLogger = MMLogger.get_current_instance()
label2cat = self.dataset_meta['label2cat']
ignore_index = self.dataset_meta['ignore_index']
gt_semantic_masks = []
pred_semantic_masks = []
for eval_ann, sinlge_pred_results in results:
gt_semantic_masks.append(eval_ann['pts_semantic_mask'])
pred_semantic_masks.append(
sinlge_pred_results['pts_semantic_mask'])
ret_dict = seg_eval(
gt_semantic_masks,
pred_semantic_masks,
label2cat,
ignore_index,
logger=logger)
return ret_dict
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import numpy as np
import torch
from mmengine.data import BaseDataElement
from mmdet3d.core.data_structures import Det3DDataSample, PointData
from mmdet3d.metrics import InstanceSegMetric
class TestInstanceSegMetric(unittest.TestCase):
def _demo_mm_inputs(self):
"""Create a superset of inputs needed to run test or train batches."""
packed_inputs = []
results_dict = dict()
mm_inputs = dict()
n_points = 3300
gt_labels = [0, 0, 0, 0, 0, 0, 14, 14, 2, 1]
gt_instance_mask = np.ones(n_points, dtype=np.int) * -1
gt_semantic_mask = np.ones(n_points, dtype=np.int) * -1
for i, gt_label in enumerate(gt_labels):
begin = i * 300
end = begin + 300
gt_instance_mask[begin:end] = i
gt_semantic_mask[begin:end] = gt_label
results_dict['pts_instance_mask'] = torch.tensor(gt_instance_mask)
results_dict['pts_semantic_mask'] = torch.tensor(gt_semantic_mask)
data_sample = Det3DDataSample()
data_sample.gt_pts_seg = PointData(**results_dict)
mm_inputs['data_sample'] = data_sample.to_dict()
packed_inputs.append(mm_inputs)
return packed_inputs
def _demo_mm_model_output(self):
"""Create a superset of inputs needed to run test or train batches."""
results_dict = dict()
n_points = 3300
gt_labels = [0, 0, 0, 0, 0, 0, 14, 14, 2, 1]
pred_instance_mask = np.ones(n_points, dtype=np.int) * -1
labels = []
scores = []
for i, gt_label in enumerate(gt_labels):
begin = i * 300
end = begin + 300
pred_instance_mask[begin:end] = i
labels.append(gt_label)
scores.append(.99)
results_dict['pts_instance_mask'] = torch.tensor(pred_instance_mask)
results_dict['instance_labels'] = torch.tensor(labels)
results_dict['instance_scores'] = torch.tensor(scores)
data_sample = Det3DDataSample()
data_sample.pred_pts_seg = PointData(**results_dict)
batch_data_samples = [data_sample]
predictions = []
for pred in batch_data_samples:
if isinstance(pred, BaseDataElement):
pred = pred.to_dict()
predictions.append(pred)
return predictions
def test_evaluate(self):
data_batch = self._demo_mm_inputs()
predictions = self._demo_mm_model_output()
valid_class_ids = (3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 24, 28, 33,
34, 36, 39)
class_labels = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet',
'sink', 'bathtub', 'garbagebin')
dataset_meta = dict(
VALID_CLASS_IDS=valid_class_ids, CLASSES=class_labels)
instance_seg_metric = InstanceSegMetric()
instance_seg_metric.dataset_meta = dataset_meta
instance_seg_metric.process(data_batch, predictions)
res = instance_seg_metric.evaluate(6)
self.assertIsInstance(res, dict)
# Copyright (c) OpenMMLab. All rights reserved.
import unittest
import torch
from mmengine.data import BaseDataElement
from mmdet3d.core.data_structures import Det3DDataSample, PointData
from mmdet3d.metrics import SegMetric
class TestSegMetric(unittest.TestCase):
def _demo_mm_inputs(self):
"""Create a superset of inputs needed to run test or train batches."""
packed_inputs = []
mm_inputs = dict()
data_sample = Det3DDataSample()
pts_semantic_mask = torch.Tensor([
0, 0, 0, 255, 0, 0, 1, 1, 1, 255, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3,
3, 255
])
gt_pts_seg_data = dict(pts_semantic_mask=pts_semantic_mask)
data_sample.gt_pts_seg = PointData(**gt_pts_seg_data)
mm_inputs['data_sample'] = data_sample.to_dict()
packed_inputs.append(mm_inputs)
return packed_inputs
def _demo_mm_model_output(self):
"""Create a superset of inputs needed to run test or train batches."""
results_dict = dict()
pts_seg_pred = torch.Tensor([
0, 0, 1, 0, 0, 2, 1, 3, 1, 2, 1, 0, 2, 2, 2, 2, 1, 3, 0, 3, 3, 3, 3
])
results_dict['pts_semantic_mask'] = pts_seg_pred
data_sample = Det3DDataSample()
data_sample['pred_pts_seg'] = results_dict
batch_data_samples = [data_sample]
predictions = []
for pred in batch_data_samples:
if isinstance(pred, BaseDataElement):
pred = pred.to_dict()
predictions.append(pred)
return predictions
def test_evaluate(self):
data_batch = self._demo_mm_inputs()
predictions = self._demo_mm_model_output()
label2cat = {
0: 'car',
1: 'bicycle',
2: 'motorcycle',
3: 'truck',
}
dataset_meta = dict(label2cat=label2cat, ignore_index=255)
seg_metric = SegMetric()
seg_metric.dataset_meta = dataset_meta
seg_metric.process(data_batch, predictions)
res = seg_metric.evaluate(0)
self.assertIsInstance(res, dict)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment