Commit d78f097c authored by VVsssssk's avatar VVsssssk Committed by ChaimZhu
Browse files

[Refactor]Add nus metric

parent 9247e72c
# Copyright (c) OpenMMLab. All rights reserved.
from .indoor_metric import IndoorMetric # noqa: F401,F403
from .kitti_metric import KittiMetric # noqa: F401,F403
from .nuscenes_metric import NuScenesMetric # noqa: F401,F403
__all_ = ['KittiMetric', 'IndoorMetric']
__all_ = ['KittiMetric', 'NuScenesMetric', 'IndoorMetric']
......@@ -138,7 +138,7 @@ class KittiMetric(BaseMetric):
Returns:
list[dict]: List of annotations.
"""
# loading data from a file-like object needs file format
# loading data from a pkl file
return mmcv.load(ann_file, file_format='pkl')
def process(self, data_batch: Sequence[dict],
......
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import tempfile
from os import path as osp
from typing import Dict, List, Optional, Sequence, Tuple, Union
import mmcv
import numpy as np
import pyquaternion
from mmengine.evaluator import BaseMetric
from mmengine.logging import MMLogger
from nuscenes.eval.detection.config import config_factory
from nuscenes.eval.detection.data_classes import DetectionConfig
from nuscenes.utils.data_classes import Box as NuScenesBox
from mmdet3d.registry import METRICS
@METRICS.register_module()
class NuScenesMetric(BaseMetric):
"""Nuscenes evaluation metric.
Args:
data_root (str): Path of dataset root.
ann_file (str): Path of annotation file.
metric (str | list[str]): Metrics to be evaluated.
Default to 'bbox'.
modality (dict): Modality to specify the sensor data used
as input. Defaults to dict(use_camera=False, use_lidar=True).
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
jsonfile_prefix (str, optional): The prefix of json files including
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
eval_version (str): Configuration version of evaluation.
Defaults to 'detection_cvpr_2019'.
collect_device (str): Device name used for collecting results
from different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
"""
NameMapping = {
'movable_object.barrier': 'barrier',
'vehicle.bicycle': 'bicycle',
'vehicle.bus.bendy': 'bus',
'vehicle.bus.rigid': 'bus',
'vehicle.car': 'car',
'vehicle.construction': 'construction_vehicle',
'vehicle.motorcycle': 'motorcycle',
'human.pedestrian.adult': 'pedestrian',
'human.pedestrian.child': 'pedestrian',
'human.pedestrian.construction_worker': 'pedestrian',
'human.pedestrian.police_officer': 'pedestrian',
'movable_object.trafficcone': 'traffic_cone',
'vehicle.trailer': 'trailer',
'vehicle.truck': 'truck'
}
DefaultAttribute = {
'car': 'vehicle.parked',
'pedestrian': 'pedestrian.moving',
'trailer': 'vehicle.parked',
'truck': 'vehicle.parked',
'bus': 'vehicle.moving',
'motorcycle': 'cycle.without_rider',
'construction_vehicle': 'vehicle.parked',
'bicycle': 'cycle.without_rider',
'barrier': '',
'traffic_cone': '',
}
# https://github.com/nutonomy/nuscenes-devkit/blob/57889ff20678577025326cfc24e57424a829be0a/python-sdk/nuscenes/eval/detection/evaluate.py#L222 # noqa
ErrNameMapping = {
'trans_err': 'mATE',
'scale_err': 'mASE',
'orient_err': 'mAOE',
'vel_err': 'mAVE',
'attr_err': 'mAAE'
}
def __init__(self,
data_root: str,
ann_file: str,
metric: Union[str, List[str]] = 'bbox',
modality: Dict = dict(use_camera=False, use_lidar=True),
prefix: Optional[str] = None,
jsonfile_prefix: Optional[str] = None,
eval_version: str = 'detection_cvpr_2019',
collect_device: str = 'cpu') -> None:
self.default_prefix = 'NuScenes metric'
super(NuScenesMetric, self).__init__(
collect_device=collect_device, prefix=prefix)
if modality is None:
modality = dict(
use_camera=False,
use_lidar=True,
)
self.ann_file = ann_file
self.data_root = data_root
self.modality = modality
self.jsonfile_prefix = jsonfile_prefix
self.metrics = metric if isinstance(metric, list) else [metric]
self.eval_version = eval_version
self.eval_detection_configs = config_factory(self.eval_version)
def load_annotations(self, ann_file: str) -> list:
"""Load annotations from ann_file.
Args:
ann_file (str): Path of the annotation file.
Returns:
list[dict]: List of annotations.
"""
# loading data from a pkl file
return mmcv.load(ann_file, file_format='pkl')
def process(self, data_batch: Sequence[dict],
predictions: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions.
The processed results should be stored in ``self.results``,
which will be used to compute the metrics when all batches
have been processed.
Args:
data_batch (Sequence[dict]): A batch of data
from the dataloader.
predictions (Sequence[dict]): A batch of outputs from
the model.
"""
assert len(data_batch) == len(predictions)
for data, pred in zip(data_batch, predictions):
result = dict()
for pred_result in pred:
if pred[pred_result] is not None:
for attr_name in pred[pred_result]:
pred[pred_result][attr_name] = pred[pred_result][
attr_name].to(self.collect_device)
result[pred_result] = pred[pred_result]
sample_idx = data['data_sample']['sample_idx']
result['sample_idx'] = sample_idx
self.results.append(result)
def compute_metrics(self, results: list) -> Dict[str, float]:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
Returns:
Dict[str, float]: The computed metrics. The keys are the names of
the metrics, and the values are corresponding results.
"""
logger: MMLogger = MMLogger.get_current_instance()
classes = self.dataset_meta['CLASSES']
self.version = self.dataset_meta['version']
# load annotations
self.data_infos = self.load_annotations(self.ann_file)['data_list']
result_dict, tmp_dir = self.format_results(results, classes,
self.jsonfile_prefix)
metric_dict = {}
for metric in self.metrics:
ap_dict = self.nus_evaluate(
result_dict, classes=classes, metric=metric, logger=logger)
for result in ap_dict:
metric_dict[result] = ap_dict[result]
if tmp_dir is not None:
tmp_dir.cleanup()
return metric_dict
def nus_evaluate(self,
result_dict: dict,
metric: str = 'bbox',
classes: List[str] = None,
logger: logging.Logger = None) -> dict:
"""Evaluation in Nuscenes protocol.
Args:
result_dict (dict): Formatted results of the dataset.
metric (str): Metrics to be evaluated.
Default: None.
classes (list[String], optional): A list of class name. Defaults
to None.
logger (MMLogger, optional): Logger used for printing
related information during evaluation. Default: None.
Returns:
dict[str, float]: Results of each evaluation metric.
"""
metric_dict = dict()
for name in result_dict:
print(f'Evaluating bboxes of {name}')
ret_dict = self._evaluate_single(
result_dict[name], classes=classes, result_name=name)
metric_dict.update(ret_dict)
return metric_dict
def _evaluate_single(self,
result_path: str,
classes: List[None] = None,
result_name: str = 'pred_instances_3d') -> dict:
"""Evaluation for a single model in nuScenes protocol.
Args:
result_path (str): Path of the result file.
Default: 'bbox'.
classes (list[String], optional): A list of class name. Defaults
to None.
result_name (str): Result name in the metric prefix.
Default: 'pred_instances_3d'.
Returns:
dict: Dictionary of evaluation details.
"""
from nuscenes import NuScenes
from nuscenes.eval.detection.evaluate import NuScenesEval
output_dir = osp.join(*osp.split(result_path)[:-1])
nusc = NuScenes(
version=self.version, dataroot=self.data_root, verbose=False)
eval_set_map = {
'v1.0-mini': 'mini_val',
'v1.0-trainval': 'val',
}
nusc_eval = NuScenesEval(
nusc,
config=self.eval_detection_configs,
result_path=result_path,
eval_set=eval_set_map[self.version],
output_dir=output_dir,
verbose=False)
nusc_eval.main(render_curves=False)
# record metrics
metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))
detail = dict()
metric_prefix = f'{result_name}_NuScenes'
for name in classes:
for k, v in metrics['label_aps'][name].items():
val = float('{:.4f}'.format(v))
detail[f'{metric_prefix}/{name}_AP_dist_{k}'] = val
for k, v in metrics['label_tp_errors'][name].items():
val = float('{:.4f}'.format(v))
detail[f'{metric_prefix}/{name}_{k}'] = val
for k, v in metrics['tp_errors'].items():
val = float('{:.4f}'.format(v))
detail[f'{metric_prefix}/{self.ErrNameMapping[k]}'] = val
detail[f'{metric_prefix}/NDS'] = metrics['nd_score']
detail[f'{metric_prefix}/mAP'] = metrics['mean_ap']
return detail
def format_results(self,
results: List[dict],
classes: List[str] = None,
jsonfile_prefix: str = None) -> Tuple:
"""Format the mmdet3d results to standard NuScenes json file.
Args:
results (list[dict]): Testing results of the dataset.
classes (list[String], optional): A list of class name. Defaults
to None.
jsonfile_prefix (str, optional): The prefix of json files. It
includes the file path and the prefix of filename, e.g.,
"a/b/prefix". If not specified, a temp file will be created.
Default: None.
Returns:
tuple: Returns (result_dict, tmp_dir), where `result_dict` is a
dict containing the json filepaths, `tmp_dir` is the temporal
directory created for saving json files when
`jsonfile_prefix` is not specified.
"""
assert isinstance(results, list), 'results must be a list'
if jsonfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
jsonfile_prefix = osp.join(tmp_dir.name, 'results')
else:
tmp_dir = None
result_dict = dict()
sample_id_list = [result['sample_idx'] for result in results]
for name in results[0]:
if 'pred' in name and '3d' in name and name[0] != '_':
# format result of model output in Det3dDataSample,
# include 'pred_instances_3d','pts_pred_instances_3d',
# 'img_pred_instances_3d'
print(f'\nFormating bboxes of {name}')
results_ = [out[name] for out in results]
tmp_file_ = osp.join(jsonfile_prefix, name)
result_dict[name] = self._format_bbox(results_, sample_id_list,
classes, tmp_file_)
return result_dict, tmp_dir
def _format_bbox(self,
results: List[dict],
sample_id_list: List[int],
classes: List[str] = None,
jsonfile_prefix: str = None) -> str:
"""Convert the results to the standard format.
Args:
results (list[dict]): Testing results of the dataset.
sample_id_list (list[int]): List of result sample id.
classes (list[String], optional): A list of class name. Defaults
to None.
jsonfile_prefix (str, optional): The prefix of the output jsonfile.
You can specify the output directory/filename by
modifying the jsonfile_prefix. Default: None.
Returns:
str: Path of the output json file.
"""
nusc_annos = {}
print('Start to convert detection format...')
for i, det in enumerate(mmcv.track_iter_progress(results)):
annos = []
boxes = output_to_nusc_box(det)
sample_id = sample_id_list[i]
sample_token = self.data_infos[sample_id]['token']
boxes = lidar_nusc_box_to_global(self.data_infos[sample_id], boxes,
classes,
self.eval_detection_configs)
for i, box in enumerate(boxes):
name = classes[box.label]
if np.sqrt(box.velocity[0]**2 + box.velocity[1]**2) > 0.2:
if name in [
'car',
'construction_vehicle',
'bus',
'truck',
'trailer',
]:
attr = 'vehicle.moving'
elif name in ['bicycle', 'motorcycle']:
attr = 'cycle.with_rider'
else:
attr = self.DefaultAttribute[name]
else:
if name in ['pedestrian']:
attr = 'pedestrian.standing'
elif name in ['bus']:
attr = 'vehicle.stopped'
else:
attr = self.DefaultAttribute[name]
nusc_anno = dict(
sample_token=sample_token,
translation=box.center.tolist(),
size=box.wlh.tolist(),
rotation=box.orientation.elements.tolist(),
velocity=box.velocity[:2].tolist(),
detection_name=name,
detection_score=box.score,
attribute_name=attr)
annos.append(nusc_anno)
nusc_annos[sample_token] = annos
nusc_submissions = {
'meta': self.modality,
'results': nusc_annos,
}
mmcv.mkdir_or_exist(jsonfile_prefix)
res_path = osp.join(jsonfile_prefix, 'results_nusc.json')
print('Results writes to', res_path)
mmcv.dump(nusc_submissions, res_path)
return res_path
def output_to_nusc_box(detection: dict) -> List[NuScenesBox]:
"""Convert the output to the box class in the nuScenes.
Args:
detection (dict): Detection results.
- bboxes_3d (:obj:`BaseInstance3DBoxes`): Detection bbox.
- scores_3d (torch.Tensor): Detection scores.
- labels_3d (torch.Tensor): Predicted box labels.
Returns:
list[:obj:`NuScenesBox`]: List of standard NuScenesBoxes.
"""
bbox3d = detection['bboxes_3d']
scores = detection['scores_3d'].numpy()
labels = detection['labels_3d'].numpy()
box_gravity_center = bbox3d.gravity_center.numpy()
box_dims = bbox3d.dims.numpy()
box_yaw = bbox3d.yaw.numpy()
# our LiDAR coordinate system -> nuScenes box coordinate system
nus_box_dims = box_dims[:, [1, 0, 2]]
box_list = []
for i in range(len(bbox3d)):
quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw[i])
velocity = (*bbox3d.tensor[i, 7:9], 0.0)
box = NuScenesBox(
box_gravity_center[i],
nus_box_dims[i],
quat,
label=labels[i],
score=scores[i],
velocity=velocity)
box_list.append(box)
return box_list
def lidar_nusc_box_to_global(
info: dict, boxes: List[NuScenesBox], classes: List[str],
eval_configs: DetectionConfig) -> List[NuScenesBox]:
"""Convert the box from ego to global coordinate.
Args:
info (dict): Info for a specific sample data, including the
calibration information.
boxes (list[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
classes (list[str]): Mapped classes in the evaluation.
eval_configs (object): Evaluation configuration object.
Returns:
list: List of standard NuScenesBoxes in the global
coordinate.
"""
box_list = []
for box in boxes:
# Move box to ego vehicle coord system
lidar2ego = np.array(info['lidar_points']['lidar2ego'])
box.rotate(
pyquaternion.Quaternion(matrix=lidar2ego, rtol=1e-05, atol=1e-07))
box.translate(lidar2ego[:3, 3])
# filter det in ego.
cls_range_map = eval_configs.class_range
radius = np.linalg.norm(box.center[:2], 2)
det_range = cls_range_map[classes[box.label]]
if radius > det_range:
continue
# Move box to global coord system
ego2global = np.array(info['ego2global'])
box.rotate(
pyquaternion.Quaternion(matrix=ego2global, rtol=1e-05, atol=1e-07))
box.translate(ego2global[:3, 3])
box_list.append(box)
return box_list
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment