Unverified Commit 0c22c625 authored by Xiang Xu's avatar Xiang Xu Committed by GitHub
Browse files

[Enhancement] Support format_only for LyftDataset. (#2333)

parent cf6f4732
# Copyright (c) OpenMMLab. All rights reserved.
from collections import OrderedDict
from typing import Dict, List, Optional, Sequence
from typing import Dict, List, Optional, Sequence, Union
import numpy as np
from mmdet.evaluation import eval_map
......@@ -17,37 +17,34 @@ class IndoorMetric(BaseMetric):
"""Indoor scene evaluation metric.
Args:
iou_thr (list[float]): List of iou threshold when calculate the
metric. Defaults to [0.25, 0.5].
collect_device (str, optional): Device name used for collecting
results from different ranks during distributed training.
Must be 'cpu' or 'gpu'. Defaults to 'cpu'.
prefix (str): The prefix that will be added in the metric
iou_thr (float or List[float]): List of iou threshold when calculate
the metric. Defaults to [0.25, 0.5].
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None
If prefix is not provided in the argument, self.default_prefix will
be used instead. Defaults to None.
"""
def __init__(self,
iou_thr: List[float] = [0.25, 0.5],
collect_device: str = 'cpu',
prefix: Optional[str] = None,
**kwargs):
prefix: Optional[str] = None) -> None:
super(IndoorMetric, self).__init__(
prefix=prefix, collect_device=collect_device)
self.iou_thr = iou_thr
self.iou_thr = [iou_thr] if isinstance(iou_thr, float) else iou_thr
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions.
The processed results should be stored in ``self.results``,
which will be used to compute the metrics when all batches
have been processed.
The processed results should be stored in ``self.results``, which will
be used to compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from
the model.
data_samples (Sequence[dict]): A batch of outputs from the model.
"""
for data_sample in data_samples:
pred_3d = data_sample['pred_instances_3d']
......@@ -98,37 +95,34 @@ class Indoor2DMetric(BaseMetric):
"""indoor 2d predictions evaluation metric.
Args:
iou_thr (list[float]): List of iou threshold when calculate the
metric. Defaults to [0.5].
collect_device (str, optional): Device name used for collecting
results from different ranks during distributed training.
Must be 'cpu' or 'gpu'. Defaults to 'cpu'.
prefix (str): The prefix that will be added in the metric
iou_thr (float or List[float]): List of iou threshold when calculate
the metric. Defaults to [0.5].
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None
If prefix is not provided in the argument, self.default_prefix will
be used instead. Defaults to None.
"""
def __init__(self,
iou_thr: List[float] = [0.5],
iou_thr: Union[float, List[float]] = [0.5],
collect_device: str = 'cpu',
prefix: Optional[str] = None,
**kwargs):
prefix: Optional[str] = None):
super(Indoor2DMetric, self).__init__(
prefix=prefix, collect_device=collect_device)
self.iou_thr = iou_thr
self.iou_thr = [iou_thr] if isinstance(iou_thr, float) else iou_thr
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions.
The processed results should be stored in ``self.results``,
which will be used to compute the metrics when all batches
have been processed.
The processed results should be stored in ``self.results``, which will
be used to compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
predictions (Sequence[dict]): A batch of outputs from
the model.
data_samples (Sequence[dict]): A batch of outputs from the model.
"""
for data_sample in data_samples:
pred = data_sample['pred_instances']
......@@ -163,9 +157,7 @@ class Indoor2DMetric(BaseMetric):
logger: MMLogger = MMLogger.get_current_instance()
annotations, preds = zip(*results)
eval_results = OrderedDict()
iou_thr_2d = (self.iou_thr) if isinstance(self.iou_thr,
float) else self.iou_thr
for iou_thr_2d_single in iou_thr_2d:
for iou_thr_2d_single in self.iou_thr:
mean_ap, _ = eval_map(
preds,
annotations,
......
......@@ -13,33 +13,30 @@ class InstanceSegMetric(BaseMetric):
"""3D instance segmentation evaluation metric.
Args:
collect_device (str, optional): Device name used for collecting
results from different ranks during distributed training.
Must be 'cpu' or 'gpu'. Defaults to 'cpu'.
prefix (str): The prefix that will be added in the metric
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Default: None
If prefix is not provided in the argument, self.default_prefix will
be used instead. Defaults to None.
"""
def __init__(self,
collect_device: str = 'cpu',
prefix: Optional[str] = None,
**kwargs):
prefix: Optional[str] = None):
super(InstanceSegMetric, self).__init__(
prefix=prefix, collect_device=collect_device)
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions.
The processed results should be stored in ``self.results``,
which will be used to compute the metrics when all batches
have been processed.
The processed results should be stored in ``self.results``, which will
be used to compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from
the model.
data_samples (Sequence[dict]): A batch of outputs from the model.
"""
for data_sample in data_samples:
pred_3d = data_sample['pred_pts_seg']
......
......@@ -22,30 +22,28 @@ class KittiMetric(BaseMetric):
Args:
ann_file (str): Annotation file path.
metric (str or List[str]): Metrics to be evaluated.
Defaults to 'bbox'.
pcd_limit_range (List[float]): The range of point cloud used to
filter invalid predicted boxes.
Defaults to [0, -40, -3, 70.4, 40, 0.0].
metric (str or List[str]): Metrics to be evaluated. Defaults to 'bbox'.
pcd_limit_range (List[float]): The range of point cloud used to filter
invalid predicted boxes. Defaults to [0, -40, -3, 70.4, 40, 0.0].
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
pklfile_prefix (str, optional): The prefix of pkl files, including
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Defaults to None.
If prefix is not provided in the argument, self.default_prefix will
be used instead. Defaults to None.
pklfile_prefix (str, optional): The prefix of pkl files, including the
file path and the prefix of filename, e.g., "a/b/prefix". If not
specified, a temp file will be created. Defaults to None.
default_cam_key (str): The default camera for lidar to camera
conversion. By default, KITTI: 'CAM2', Waymo: 'CAM_FRONT'.
Defaults to 'CAM2'
Defaults to 'CAM2'.
format_only (bool): Format the output results without perform
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server.
evaluation. It is useful when you want to format the result to a
specific format and submit it to the test server.
Defaults to False.
submission_prefix (str, optional): The prefix of submission data.
If not specified, the submission data will not be generated.
submission_prefix (str, optional): The prefix of submission data. If
not specified, the submission data will not be generated.
Defaults to None.
collect_device (str): Device name used for collecting results
from different ranks during distributed training. Must be 'cpu' or
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
......@@ -147,14 +145,12 @@ class KittiMetric(BaseMetric):
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions.
The processed results should be stored in ``self.results``,
which will be used to compute the metrics when all batches
have been processed.
The processed results should be stored in ``self.results``, which will
be used to compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from
the model.
data_samples (Sequence[dict]): A batch of outputs from the model.
"""
for data_sample in data_samples:
......@@ -196,8 +192,8 @@ class KittiMetric(BaseMetric):
metric_dict = {}
if self.format_only:
logger.info('results are saved in '
f'{osp.dirname(self.submission_prefix)}')
logger.info(
f'results are saved in {osp.dirname(self.submission_prefix)}')
return metric_dict
gt_annos = [
......@@ -230,12 +226,11 @@ class KittiMetric(BaseMetric):
Args:
results_dict (dict): Formatted results of the dataset.
gt_annos (List[dict]): Contain gt information of each sample.
metric (str, optional): Metrics to be evaluated.
Defaults to None.
metric (str, optional): Metrics to be evaluated. Defaults to None.
classes (List[str], optional): A list of class name.
Defaults to None.
logger (MMLogger, optional): Logger used for printing
related information during evaluation. Defaults to None.
logger (MMLogger, optional): Logger used for printing related
information during evaluation. Defaults to None.
Returns:
Dict[str, float]: Results of each evaluation metric.
......@@ -278,9 +273,9 @@ class KittiMetric(BaseMetric):
Defaults to None.
Returns:
tuple: (result_dict, tmp_dir), result_dict is a dict containing
the formatted result, tmp_dir is the temporal directory created
for saving json files when jsonfile_prefix is not specified.
tuple: (result_dict, tmp_dir), result_dict is a dict containing the
formatted result, tmp_dir is the temporal directory created for
saving json files when jsonfile_prefix is not specified.
"""
if pklfile_prefix is None:
tmp_dir = tempfile.TemporaryDirectory()
......@@ -326,8 +321,8 @@ class KittiMetric(BaseMetric):
submission.
Args:
net_outputs (List[dict]): List of dict storing the
inferenced bounding boxes and scores.
net_outputs (List[dict]): List of dict storing the inferenced
bounding boxes and scores.
sample_idx_list (List[int]): List of input sample idx.
class_names (List[str]): A list of class names.
pklfile_prefix (str, optional): The prefix of pkl file.
......@@ -453,8 +448,8 @@ class KittiMetric(BaseMetric):
submission.
Args:
net_outputs (List[dict]): List of dict storing the
inferenced bounding boxes and scores.
net_outputs (List[dict]): List of dict storing the inferenced
bounding boxes and scores.
sample_idx_list (List[int]): List of input sample idx.
class_names (List[str]): A list of class names.
pklfile_prefix (str, optional): The prefix of pkl file.
......@@ -571,14 +566,14 @@ class KittiMetric(BaseMetric):
Returns:
dict: Valid predicted boxes.
- bbox (np.ndarray): 2D bounding boxes.
- box3d_camera (np.ndarray): 3D bounding boxes in
camera coordinate.
- box3d_lidar (np.ndarray): 3D bounding boxes in
LiDAR coordinate.
- scores (np.ndarray): Scores of boxes.
- label_preds (np.ndarray): Class label predictions.
- sample_idx (int): Sample index.
- bbox (np.ndarray): 2D bounding boxes.
- box3d_camera (np.ndarray): 3D bounding boxes in
camera coordinate.
- box3d_lidar (np.ndarray): 3D bounding boxes in
LiDAR coordinate.
- scores (np.ndarray): Scores of boxes.
- label_preds (np.ndarray): Class label predictions.
- sample_idx (int): Sample index.
"""
# TODO: refactor this function
box_preds = box_dict['bboxes_3d']
......
# Copyright (c) OpenMMLab. All rights reserved.
import logging
import os
import tempfile
from os import path as osp
......@@ -26,24 +25,29 @@ class LyftMetric(BaseMetric):
Args:
data_root (str): Path of dataset root.
ann_file (str): Path of annotation file.
metric (str | list[str]): Metrics to be evaluated.
Default to 'bbox'.
modality (dict): Modality to specify the sensor data used
as input. Defaults to dict(use_camera=False, use_lidar=True).
metric (str or List[str]): Metrics to be evaluated. Defaults to 'bbox'.
modality (dict): Modality to specify the sensor data used as input.
Defaults to dict(use_camera=False, use_lidar=True).
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
jsonfile_prefix (str, optional): The prefix of json files including
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
csv_savepath (str, optional): The path for saving csv files.
It includes the file path and the csv filename,
e.g., "a/b/filename.csv". If not specified,
the result will not be converted to csv file.
collect_device (str): Device name used for collecting results
from different ranks during distributed training. Must be 'cpu' or
If prefix is not provided in the argument, self.default_prefix will
be used instead. Defaults to None.
jsonfile_prefix (str, optional): The prefix of json files including the
file path and the prefix of filename, e.g., "a/b/prefix". If not
specified, a temp file will be created. Defaults to None.
format_only (bool): Format the output results without perform
evaluation. It is useful when you want to format the result to a
specific format and submit it to the test server.
Defaults to False.
csv_savepath (str, optional): The path for saving csv files. It
includes the file path and the csv filename, e.g.,
"a/b/filename.csv". If not specified, the result will not be
converted to csv file. Defaults to None.
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
"""
def __init__(self,
......@@ -56,6 +60,7 @@ class LyftMetric(BaseMetric):
),
prefix: Optional[str] = None,
jsonfile_prefix: str = None,
format_only: bool = False,
csv_savepath: str = None,
collect_device: str = 'cpu',
backend_args: Optional[dict] = None) -> None:
......@@ -66,6 +71,12 @@ class LyftMetric(BaseMetric):
self.data_root = data_root
self.modality = modality
self.jsonfile_prefix = jsonfile_prefix
self.format_only = format_only
if self.format_only:
assert csv_savepath is not None, 'csv_savepath must be not None '
'when format_only is True, otherwise the result files will be '
'saved to a temp directory which will be cleaned up at the end.'
self.backend_args = backend_args
self.csv_savepath = csv_savepath
self.metrics = metric if isinstance(metric, list) else [metric]
......@@ -73,14 +84,12 @@ class LyftMetric(BaseMetric):
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and data_samples.
The processed results should be stored in ``self.results``,
which will be used to compute the metrics when all batches
have been processed.
The processed results should be stored in ``self.results``, which will
be used to compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from
the model.
data_samples (Sequence[dict]): A batch of outputs from the model.
"""
for data_sample in data_samples:
result = dict()
......@@ -94,13 +103,13 @@ class LyftMetric(BaseMetric):
result['pred_instances'] = pred_2d
sample_idx = data_sample['sample_idx']
result['sample_idx'] = sample_idx
self.results.append(result)
self.results.append(result)
def compute_metrics(self, results: list) -> Dict[str, float]:
def compute_metrics(self, results: List[dict]) -> Dict[str, float]:
"""Compute the metrics from processed results.
Args:
results (list): The processed results of each batch.
results (List[dict]): The processed results of the whole dataset.
Returns:
Dict[str, float]: The computed metrics. The keys are the names of
......@@ -110,14 +119,21 @@ class LyftMetric(BaseMetric):
classes = self.dataset_meta['classes']
self.version = self.dataset_meta['version']
# load annotations
# load annotations
self.data_infos = load(
self.ann_file, backend_args=self.backend_args)['data_list']
result_dict, tmp_dir = self.format_results(results, classes,
self.jsonfile_prefix)
self.jsonfile_prefix,
self.csv_savepath)
metric_dict = {}
if self.format_only:
logger.info(
f'results are saved in {osp.dirname(self.csv_savepath)}')
return metric_dict
for metric in self.metrics:
ap_dict = self.lyft_evaluate(
result_dict, metric=metric, logger=logger)
......@@ -128,31 +144,33 @@ class LyftMetric(BaseMetric):
tmp_dir.cleanup()
return metric_dict
def format_results(self,
results: List[dict],
classes: List[str] = None,
jsonfile_prefix: str = None,
csv_savepath: str = None) -> Tuple:
def format_results(
self,
results: List[dict],
classes: Optional[List[str]] = None,
jsonfile_prefix: Optional[str] = None,
csv_savepath: Optional[str] = None
) -> Tuple[dict, Union[tempfile.TemporaryDirectory, None]]:
"""Format the results to json (standard format for COCO evaluation).
Args:
results (list[dict]): Testing results of the dataset.
classes (list[String], optional): A list of class name. Defaults
to None.
results (List[dict]): Testing results of the dataset.
classes (List[str], optional): A list of class name.
Defaults to None.
jsonfile_prefix (str, optional): The prefix of json files. It
includes the file path and the prefix of filename, e.g.,
"a/b/prefix". If not specified, a temp file will be created.
Default: None.
csv_savepath (str, optional): The path for saving csv files.
It includes the file path and the csv filename,
e.g., "a/b/filename.csv". If not specified,
the result will not be converted to csv file.
Defaults to None.
csv_savepath (str, optional): The path for saving csv files. It
includes the file path and the csv filename, e.g.,
"a/b/filename.csv". If not specified, the result will not be
converted to csv file. Defaults to None.
Returns:
tuple: Returns (result_dict, tmp_dir), where `result_dict` is a
dict containing the json filepaths, `tmp_dir` is the temporal
directory created for saving json files when
`jsonfile_prefix` is not specified.
tuple: Returns (result_dict, tmp_dir), where ``result_dict`` is a
dict containing the json filepaths, ``tmp_dir`` is the temporal
directory created for saving json files when ``jsonfile_prefix`` is
not specified.
"""
assert isinstance(results, list), 'results must be a list'
......@@ -162,7 +180,7 @@ class LyftMetric(BaseMetric):
else:
tmp_dir = None
result_dict = dict()
sample_id_list = [result['sample_idx'] for result in results]
sample_idx_list = [result['sample_idx'] for result in results]
for name in results[0]:
if 'pred' in name and '3d' in name and name[0] != '_':
......@@ -172,8 +190,9 @@ class LyftMetric(BaseMetric):
# 'img_pred_instances_3d'
results_ = [out[name] for out in results]
tmp_file_ = osp.join(jsonfile_prefix, name)
result_dict[name] = self._format_bbox(results_, sample_id_list,
classes, tmp_file_)
result_dict[name] = self._format_bbox(results_,
sample_idx_list, classes,
tmp_file_)
if csv_savepath is not None:
if 'pred_instances_3d' in result_dict:
self.json2csv(result_dict['pred_instances_3d'], csv_savepath)
......@@ -221,19 +240,19 @@ class LyftMetric(BaseMetric):
def _format_bbox(self,
results: List[dict],
sample_id_list: List[int],
classes: List[str] = None,
jsonfile_prefix: str = None) -> str:
sample_idx_list: List[int],
classes: Optional[List[str]] = None,
jsonfile_prefix: Optional[str] = None) -> str:
"""Convert the results to the standard format.
Args:
results (list[dict]): Testing results of the dataset.
sample_id_list (list[int]): List of result sample id.
classes (list[String], optional): A list of class name. Defaults
to None.
results (List[dict]): Testing results of the dataset.
sample_idx_list (List[int]): List of result sample idx.
classes (List[str], optional): A list of class name.
Defaults to None.
jsonfile_prefix (str, optional): The prefix of the output jsonfile.
You can specify the output directory/filename by
modifying the jsonfile_prefix. Default: None.
You can specify the output directory/filename by modifying the
jsonfile_prefix. Defaults to None.
Returns:
str: Path of the output json file.
......@@ -244,9 +263,10 @@ class LyftMetric(BaseMetric):
for i, det in enumerate(mmengine.track_iter_progress(results)):
annos = []
boxes = output_to_lyft_box(det)
sample_id = sample_id_list[i]
sample_token = self.data_infos[sample_id]['token']
boxes = lidar_lyft_box_to_global(self.data_infos[sample_id], boxes)
sample_idx = sample_idx_list[i]
sample_token = self.data_infos[sample_idx]['token']
boxes = lidar_lyft_box_to_global(self.data_infos[sample_idx],
boxes)
for i, box in enumerate(boxes):
name = classes[box.label]
lyft_anno = dict(
......@@ -272,27 +292,24 @@ class LyftMetric(BaseMetric):
def lyft_evaluate(self,
result_dict: dict,
metric: str = 'bbox',
logger: logging.Logger = None) -> dict:
logger: Optional[MMLogger] = None) -> Dict[str, float]:
"""Evaluation in Lyft protocol.
Args:
result_dict (dict): Formatted results of the dataset.
metric (str): Metrics to be evaluated.
Default: 'bbox'.
classes (list[String], optional): A list of class name. Defaults
to None.
logger (MMLogger, optional): Logger used for printing
related information during evaluation. Default: None.
metric (str): Metrics to be evaluated. Defaults to 'bbox'.
logger (MMLogger, optional): Logger used for printing related
information during evaluation. Defaults to None.
Returns:
dict[str, float]: Evaluation results.
Dict[str, float]: Evaluation results.
"""
metric_dict = dict()
for name in result_dict:
print('Evaluating bboxes of {}'.format(name))
print(f'Evaluating bboxes of {name}')
ret_dict = self._evaluate_single(
result_dict[name], logger=logger, result_name=name)
metric_dict.update(ret_dict)
metric_dict.update(ret_dict)
return metric_dict
def _evaluate_single(self,
......@@ -303,15 +320,13 @@ class LyftMetric(BaseMetric):
Args:
result_path (str): Path of the result file.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Default: None.
metric (str): Metric name used for evaluation.
Default: 'bbox'.
logger (MMLogger, optional): Logger used for printing related
information during evaluation. Defaults to None.
result_name (str): Result name in the metric prefix.
Default: 'pts_bbox'.
Defaults to 'pts_bbox'.
Returns:
dict: Dictionary of evaluation details.
Dict[str, float]: Dictionary of evaluation details.
"""
output_dir = osp.join(*osp.split(result_path)[:-1])
lyft = Lyft(
......@@ -343,7 +358,7 @@ def output_to_lyft_box(detection: dict) -> List[LyftBox]:
detection (dict): Detection results.
Returns:
list[:obj:`LyftBox`]: List of standard LyftBoxes.
List[:obj:`LyftBox`]: List of standard LyftBoxes.
"""
bbox3d = detection['bbox_3d']
scores = detection['scores_3d'].numpy()
......@@ -374,13 +389,13 @@ def lidar_lyft_box_to_global(info: dict,
"""Convert the box from ego to global coordinate.
Args:
info (dict): Info for a specific sample data, including the
calibration information.
boxes (list[:obj:`LyftBox`]): List of predicted LyftBoxes.
info (dict): Info for a specific sample data, including the calibration
information.
boxes (List[:obj:`LyftBox`]): List of predicted LyftBoxes.
Returns:
list: List of standard LyftBoxes in the global
coordinate.
List[:obj:`LyftBox`]: List of standard LyftBoxes in the global
coordinate.
"""
box_list = []
for box in boxes:
......
......@@ -27,25 +27,24 @@ class NuScenesMetric(BaseMetric):
Args:
data_root (str): Path of dataset root.
ann_file (str): Path of annotation file.
metric (str or List[str]): Metrics to be evaluated.
Defaults to 'bbox'.
modality (dict): Modality to specify the sensor data used
as input. Defaults to dict(use_camera=False, use_lidar=True).
metric (str or List[str]): Metrics to be evaluated. Defaults to 'bbox'.
modality (dict): Modality to specify the sensor data used as input.
Defaults to dict(use_camera=False, use_lidar=True).
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
If prefix is not provided in the argument, self.default_prefix will
be used instead. Defaults to None.
format_only (bool): Format the output results without perform
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server.
evaluation. It is useful when you want to format the result to a
specific format and submit it to the test server.
Defaults to False.
jsonfile_prefix (str, optional): The prefix of json files including
the file path and the prefix of filename, e.g., "a/b/prefix".
jsonfile_prefix (str, optional): The prefix of json files including the
file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Defaults to None.
eval_version (str): Configuration version of evaluation.
Defaults to 'detection_cvpr_2019'.
collect_device (str): Device name used for collecting results
from different ranks during distributed training. Must be 'cpu' or
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
......@@ -111,10 +110,9 @@ class NuScenesMetric(BaseMetric):
self.modality = modality
self.format_only = format_only
if self.format_only:
assert jsonfile_prefix is not None, 'jsonfile_prefix must be '
'not None when format_only is True, otherwise the result files '
'will be saved to a temp directory which will be cleanup at '
'the end.'
assert jsonfile_prefix is not None, 'jsonfile_prefix must be not '
'None when format_only is True, otherwise the result files will '
'be saved to a temp directory which will be cleanup at the end.'
self.jsonfile_prefix = jsonfile_prefix
self.backend_args = backend_args
......@@ -127,14 +125,12 @@ class NuScenesMetric(BaseMetric):
def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None:
"""Process one batch of data samples and predictions.
The processed results should be stored in ``self.results``,
which will be used to compute the metrics when all batches
have been processed.
The processed results should be stored in ``self.results``, which will
be used to compute the metrics when all batches have been processed.
Args:
data_batch (dict): A batch of data from the dataloader.
data_samples (Sequence[dict]): A batch of outputs from
the model.
data_samples (Sequence[dict]): A batch of outputs from the model.
"""
for data_sample in data_samples:
result = dict()
......@@ -173,8 +169,8 @@ class NuScenesMetric(BaseMetric):
metric_dict = {}
if self.format_only:
logger.info('results are saved in '
f'{osp.basename(self.jsonfile_prefix)}')
logger.info(
f'results are saved in {osp.basename(self.jsonfile_prefix)}')
return metric_dict
for metric in self.metrics:
......@@ -199,8 +195,8 @@ class NuScenesMetric(BaseMetric):
metric (str): Metrics to be evaluated. Defaults to 'bbox'.
classes (List[str], optional): A list of class name.
Defaults to None.
logger (MMLogger, optional): Logger used for printing
related information during evaluation. Defaults to None.
logger (MMLogger, optional): Logger used for printing related
information during evaluation. Defaults to None.
Returns:
Dict[str, float]: Results of each evaluation metric.
......@@ -210,7 +206,7 @@ class NuScenesMetric(BaseMetric):
print(f'Evaluating bboxes of {name}')
ret_dict = self._evaluate_single(
result_dict[name], classes=classes, result_name=name)
metric_dict.update(ret_dict)
metric_dict.update(ret_dict)
return metric_dict
def _evaluate_single(
......@@ -286,10 +282,10 @@ class NuScenesMetric(BaseMetric):
Defaults to None.
Returns:
tuple: Returns (result_dict, tmp_dir), where `result_dict` is a
dict containing the json filepaths, `tmp_dir` is the temporal
directory created for saving json files when
`jsonfile_prefix` is not specified.
tuple: Returns (result_dict, tmp_dir), where ``result_dict`` is a
dict containing the json filepaths, ``tmp_dir`` is the temporal
directory created for saving json files when ``jsonfile_prefix`` is
not specified.
"""
assert isinstance(results, list), 'results must be a list'
......@@ -320,9 +316,9 @@ class NuScenesMetric(BaseMetric):
"""Get attribute from predicted index.
This is a workaround to predict attribute when the predicted velocity
is not reliable. We map the predicted attribute index to the one
in the attribute set. If it is consistent with the category, we will
keep it. Otherwise, we will use the default attribute.
is not reliable. We map the predicted attribute index to the one in the
attribute set. If it is consistent with the category, we will keep it.
Otherwise, we will use the default attribute.
Args:
attr_idx (int): Attribute index.
......@@ -376,8 +372,8 @@ class NuScenesMetric(BaseMetric):
classes (List[str], optional): A list of class name.
Defaults to None.
jsonfile_prefix (str, optional): The prefix of the output jsonfile.
You can specify the output directory/filename by
modifying the jsonfile_prefix. Defaults to None.
You can specify the output directory/filename by modifying the
jsonfile_prefix. Defaults to None.
Returns:
str: Path of the output json file.
......@@ -499,8 +495,8 @@ class NuScenesMetric(BaseMetric):
classes (List[str], optional): A list of class name.
Defaults to None.
jsonfile_prefix (str, optional): The prefix of the output jsonfile.
You can specify the output directory/filename by
modifying the jsonfile_prefix. Defaults to None.
You can specify the output directory/filename by modifying the
jsonfile_prefix. Defaults to None.
Returns:
str: Path of the output json file.
......@@ -573,8 +569,8 @@ def output_to_nusc_box(
- labels_3d (torch.Tensor): Predicted box labels.
Returns:
Tuple[List[:obj:`NuScenesBox`], np.ndarray or None]:
List of standard NuScenesBoxes and attribute labels.
Tuple[List[:obj:`NuScenesBox`], np.ndarray or None]: List of standard
NuScenesBoxes and attribute labels.
"""
bbox3d = detection['bboxes_3d']
scores = detection['scores_3d'].numpy()
......@@ -640,8 +636,8 @@ def lidar_nusc_box_to_global(
"""Convert the box from ego to global coordinate.
Args:
info (dict): Info for a specific sample data, including the
calibration information.
info (dict): Info for a specific sample data, including the calibration
information.
boxes (List[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
classes (List[str]): Mapped classes in the evaluation.
eval_configs (:obj:`DetectionConfig`): Evaluation configuration object.
......@@ -683,8 +679,8 @@ def cam_nusc_box_to_global(
"""Convert the box from camera to global coordinate.
Args:
info (dict): Info for a specific sample data, including the
calibration information.
info (dict): Info for a specific sample data, including the calibration
information.
boxes (List[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
attrs (np.ndarray): Predicted attributes.
classes (List[str]): Mapped classes in the evaluation.
......@@ -692,9 +688,8 @@ def cam_nusc_box_to_global(
camera_type (str): Type of camera. Defaults to 'CAM_FRONT'.
Returns:
Tuple[List[:obj:`NuScenesBox`], List[int]]:
List of standard NuScenesBoxes in the global coordinate and
attribute label.
Tuple[List[:obj:`NuScenesBox`], List[int]]: List of standard
NuScenesBoxes in the global coordinate and attribute label.
"""
box_list = []
attr_list = []
......@@ -726,15 +721,15 @@ def global_nusc_box_to_cam(info: dict, boxes: List[NuScenesBox],
"""Convert the box from global to camera coordinate.
Args:
info (dict): Info for a specific sample data, including the
calibration information.
info (dict): Info for a specific sample data, including the calibration
information.
boxes (List[:obj:`NuScenesBox`]): List of predicted NuScenesBoxes.
classes (List[str]): Mapped classes in the evaluation.
eval_configs (:obj:`DetectionConfig`): Evaluation configuration object.
Returns:
List[:obj:`NuScenesBox`]: List of standard NuScenesBoxes in
camera coordinate.
List[:obj:`NuScenesBox`]: List of standard NuScenesBoxes in camera
coordinate.
"""
box_list = []
for box in boxes:
......
......@@ -24,30 +24,28 @@ class WaymoMetric(KittiMetric):
Args:
ann_file (str): The path of the annotation file in kitti format.
waymo_bin_file (str): The path of the annotation file in waymo format.
data_root (str): Path of dataset root.
Used for storing waymo evaluation programs.
data_root (str): Path of dataset root. Used for storing waymo
evaluation programs.
split (str): The split of the evaluation set. Defaults to 'training'.
metric (str or List[str]): Metrics to be evaluated.
Defaults to 'mAP'.
pcd_limit_range (List[float]): The range of point cloud used to
filter invalid predicted boxes.
Defaults to [-85, -85, -5, 85, 85, 5].
convert_kitti_format (bool): Whether to convert the results to
kitti format. Now, in order to be compatible with camera-based
methods, defaults to True.
metric (str or List[str]): Metrics to be evaluated. Defaults to 'mAP'.
pcd_limit_range (List[float]): The range of point cloud used to filter
invalid predicted boxes. Defaults to [-85, -85, -5, 85, 85, 5].
convert_kitti_format (bool): Whether to convert the results to kitti
format. Now, in order to be compatible with camera-based methods,
defaults to True.
prefix (str, optional): The prefix that will be added in the metric
names to disambiguate homonymous metrics of different evaluators.
If prefix is not provided in the argument, self.default_prefix
will be used instead. Defaults to None.
If prefix is not provided in the argument, self.default_prefix will
be used instead. Defaults to None.
format_only (bool): Format the output results without perform
evaluation. It is useful when you want to format the result
to a specific format and submit it to the test server.
evaluation. It is useful when you want to format the result to a
specific format and submit it to the test server.
Defaults to False.
pklfile_prefix (str, optional): The prefix of pkl files, including
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Defaults to None.
submission_prefix (str, optional): The prefix of submission data.
If not specified, the submission data will not be generated.
pklfile_prefix (str, optional): The prefix of pkl files, including the
file path and the prefix of filename, e.g., "a/b/prefix". If not
specified, a temp file will be created. Defaults to None.
submission_prefix (str, optional): The prefix of submission data. If
not specified, the submission data will not be generated.
Defaults to None.
load_type (str): Type of loading mode during training.
......@@ -55,19 +53,19 @@ class WaymoMetric(KittiMetric):
- 'mv_image_based': Load all of the instances in the frame and need
to convert to the FOV-based data type to support image-based
detector.
- 'fov_image_based': Only load the instances inside the default
cam, and need to convert to the FOV-based data type to support
image-based detector.
- 'fov_image_based': Only load the instances inside the default cam
and need to convert to the FOV-based data type to support image-
based detector.
default_cam_key (str): The default camera for lidar to camera
conversion. By default, KITTI: 'CAM2', Waymo: 'CAM_FRONT'.
Defaults to 'CAM_FRONT'.
use_pred_sample_idx (bool): In formating results, use the
sample index from the prediction or from the load annotations.
By default, KITTI: True, Waymo: False, Waymo has a conversion
process, which needs to use the sample idx from load annotation.
use_pred_sample_idx (bool): In formating results, use the sample index
from the prediction or from the load annotations. By default,
KITTI: True, Waymo: False, Waymo has a conversion process, which
needs to use the sample idx from load annotation.
Defaults to False.
collect_device (str): Device name used for collecting results
from different ranks during distributed training. Must be 'cpu' or
collect_device (str): Device name used for collecting results from
different ranks during distributed training. Must be 'cpu' or
'gpu'. Defaults to 'cpu'.
backend_args (dict, optional): Arguments to instantiate the
corresponding backend. Defaults to None.
......@@ -120,10 +118,9 @@ class WaymoMetric(KittiMetric):
backend_args=backend_args)
self.format_only = format_only
if self.format_only:
assert pklfile_prefix is not None, 'pklfile_prefix must be '
'not None when format_only is True, otherwise the result files '
'will be saved to a temp directory which will be cleaned up at '
'the end.'
assert pklfile_prefix is not None, 'pklfile_prefix must be not '
'None when format_only is True, otherwise the result files will '
'be saved to a temp directory which will be cleaned up at the end.'
self.default_prefix = 'Waymo metric'
......@@ -216,8 +213,8 @@ class WaymoMetric(KittiMetric):
pklfile_prefix (str): The location that stored the prediction
results.
metric (str, optional): Metric to be evaluated. Defaults to None.
logger (MMLogger, optional): Logger used for printing
related information during evaluation. Defaults to None.
logger (MMLogger, optional): Logger used for printing related
information during evaluation. Defaults to None.
Returns:
Dict[str, float]: Results of each evaluation metric.
......@@ -348,9 +345,9 @@ class WaymoMetric(KittiMetric):
Defaults to None.
Returns:
tuple: (result_dict, tmp_dir), result_dict is a dict containing
the formatted result, tmp_dir is the temporal directory created
for saving json files when jsonfile_prefix is not specified.
tuple: (result_dict, tmp_dir), result_dict is a dict containing the
formatted result, tmp_dir is the temporal directory created for
saving json files when jsonfile_prefix is not specified.
"""
waymo_save_tmp_dir = tempfile.TemporaryDirectory()
waymo_results_save_dir = waymo_save_tmp_dir.name
......@@ -401,8 +398,8 @@ class WaymoMetric(KittiMetric):
"""Merge bounding boxes predicted from multi-view images.
Args:
box_dict_per_frame (List[dict]): The results of prediction
for each camera.
box_dict_per_frame (List[dict]): The results of prediction for each
camera.
cam0_info (dict): Store the sample idx for the given frame.
Returns:
......@@ -475,8 +472,8 @@ class WaymoMetric(KittiMetric):
submission.
Args:
net_outputs (List[dict]): List of dict storing the
inferenced bounding boxes and scores.
net_outputs (List[dict]): List of dict storing the inferenced
bounding boxes and scores.
sample_idx_list (List[int]): List of input sample idx.
class_names (List[str]): A list of class names.
pklfile_prefix (str, optional): The prefix of pkl file.
......@@ -625,14 +622,13 @@ class WaymoMetric(KittiMetric):
Returns:
dict: Valid predicted boxes.
- bbox (np.ndarray): 2D bounding boxes.
- box3d_camera (np.ndarray): 3D bounding boxes in
camera coordinate.
- box3d_lidar (np.ndarray): 3D bounding boxes in
LiDAR coordinate.
- scores (np.ndarray): Scores of boxes.
- label_preds (np.ndarray): Class label predictions.
- sample_idx (int): Sample index.
- bbox (np.ndarray): 2D bounding boxes.
- box3d_camera (np.ndarray): 3D bounding boxes in camera
coordinate.
- box3d_lidar (np.ndarray): 3D bounding boxes in LiDAR coordinate.
- scores (np.ndarray): Scores of boxes.
- label_preds (np.ndarray): Class label predictions.
- sample_idx (int): Sample index.
"""
# TODO: refactor this function
box_preds = box_dict['bboxes_3d']
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment