Commit 464b6cdf authored by liyinhao's avatar liyinhao Committed by zhangwenwei
Browse files

change evaluate format.

parent 2bf79830
...@@ -199,28 +199,35 @@ def eval_map_recall(det_infos, gt_infos, ovthresh=None): ...@@ -199,28 +199,35 @@ def eval_map_recall(det_infos, gt_infos, ovthresh=None):
for multiple classes. for multiple classes.
Args: Args:
det_infos (list[list[list[tuple]]]): Label, bbox and det_infos (list[dict]): Information of detection results, the dict
score of the detection result. includes the following keys
gt_infos (list[list[list]]): Label, bbox of the groundtruth. - labels_3d (Tensor): Labels of boxes.
- boxes_3d (Tensor): 3d bboxes.
- scores_3d (Tensor): Scores of boxes.
gt_infos (list[dict]): information of gt results, the dict
includes the following keys
- labels_3d (Tensor): labels of boxes.
- boxes_3d (Tensor): 3d bboxes.
ovthresh (list[float]): iou threshold. ovthresh (list[float]): iou threshold.
Default: None. Default: None.
Return: Return:
dict: {classname: rec}. tuple[dict]: dict results of recall, AP, and precision for all classes.
dict: {classname: prec_all}.
dict: {classname: scalar}.
""" """
pred_all = {} pred_all = {}
scan_cnt = 0 scan_cnt = 0
for batch_pred_map_cls in det_infos: for det_info in det_infos:
for i in range(len(batch_pred_map_cls)): pred_all[scan_cnt] = det_info
pred_all[scan_cnt] = batch_pred_map_cls[i]
scan_cnt += 1 scan_cnt += 1
pred = {} # map {classname: pred} pred = {} # map {classname: pred}
gt = {} # map {classname: gt} gt = {} # map {classname: gt}
for img_id in pred_all.keys(): for img_id in pred_all.keys():
for label, bbox, score in pred_all[img_id]: for i in range(len(pred_all[img_id]['labels_3d'])):
label = pred_all[img_id]['labels_3d'].numpy()[i]
bbox = pred_all[img_id]['boxes_3d'].numpy()[i]
score = pred_all[img_id]['scores_3d'].numpy()[i]
if label not in pred: if label not in pred:
pred[int(label)] = {} pred[int(label)] = {}
if img_id not in pred[label]: if img_id not in pred[label]:
...@@ -232,7 +239,9 @@ def eval_map_recall(det_infos, gt_infos, ovthresh=None): ...@@ -232,7 +239,9 @@ def eval_map_recall(det_infos, gt_infos, ovthresh=None):
pred[int(label)][img_id].append((bbox, score)) pred[int(label)][img_id].append((bbox, score))
for img_id in range(len(gt_infos)): for img_id in range(len(gt_infos)):
for label, bbox in gt_infos[img_id]: for i in range(len(gt_infos[img_id]['labels_3d'])):
label = gt_infos[img_id]['labels_3d'][i]
bbox = gt_infos[img_id]['boxes_3d'][i]
if label not in gt: if label not in gt:
gt[label] = {} gt[label] = {}
if img_id not in gt[label]: if img_id not in gt[label]:
...@@ -267,8 +276,8 @@ def indoor_eval(gt_annos, dt_annos, metric, label2cat): ...@@ -267,8 +276,8 @@ def indoor_eval(gt_annos, dt_annos, metric, label2cat):
Evaluate the result of the detection. Evaluate the result of the detection.
Args: Args:
gt_annos (list[list[dict]]): GT annotations. gt_annos (list[dict]): GT annotations.
dt_annos (list[list[List[tuple]]]): Detection annotations. dt_annos (list[dict]): Detection annotations.
metric (list[float]): AP IoU thresholds. metric (list[float]): AP IoU thresholds.
label2cat (dict): {label: cat}. label2cat (dict): {label: cat}.
...@@ -284,11 +293,8 @@ def indoor_eval(gt_annos, dt_annos, metric, label2cat): ...@@ -284,11 +293,8 @@ def indoor_eval(gt_annos, dt_annos, metric, label2cat):
if bbox_lidar_bottom.shape[-1] == 6: if bbox_lidar_bottom.shape[-1] == 6:
bbox_lidar_bottom = np.pad(bbox_lidar_bottom, ((0, 0), (0, 1)), bbox_lidar_bottom = np.pad(bbox_lidar_bottom, ((0, 0), (0, 1)),
'constant') 'constant')
gt_info_temp = [] gt_infos.append(
for i in range(gt_anno['gt_num']): dict(boxes_3d=bbox_lidar_bottom, labels_3d=gt_anno['class']))
gt_info_temp.append(
[gt_anno['class'][i], bbox_lidar_bottom[i]])
gt_infos.append(gt_info_temp)
result_str = str() result_str = str()
result_str += 'mAP' result_str += 'mAP'
......
import os.path as osp
import tempfile
import mmcv import mmcv
import numpy as np import numpy as np
from torch.utils.data import Dataset from torch.utils.data import Dataset
...@@ -40,7 +43,10 @@ class Custom3DDataset(Dataset): ...@@ -40,7 +43,10 @@ class Custom3DDataset(Dataset):
sample_idx = info['point_cloud']['lidar_idx'] sample_idx = info['point_cloud']['lidar_idx']
pts_filename = self._get_pts_filename(sample_idx) pts_filename = self._get_pts_filename(sample_idx)
input_dict = dict(pts_filename=pts_filename) input_dict = dict(
pts_filename=pts_filename,
sample_idx=sample_idx,
file_name=pts_filename)
if not self.test_mode: if not self.test_mode:
annos = self.get_ann_info(index) annos = self.get_ann_info(index)
...@@ -97,41 +103,16 @@ class Custom3DDataset(Dataset): ...@@ -97,41 +103,16 @@ class Custom3DDataset(Dataset):
return class_names return class_names
def _generate_annotations(self, output): def format_results(self,
"""Generate annotations. outputs,
pklfile_prefix=None,
Transform results of the model to the form of the evaluation. submission_prefix=None):
if pklfile_prefix is None:
Args: tmp_dir = tempfile.TemporaryDirectory()
output (list): The output of the model. pklfile_prefix = osp.join(tmp_dir.name, 'results')
""" out = f'{pklfile_prefix}.pkl'
result = [] mmcv.dump(outputs, out)
bs = len(output) return outputs, tmp_dir
for i in range(bs):
pred_list_i = list()
pred_boxes = output[i]
box3d_depth = pred_boxes['box3d_lidar']
if box3d_depth is not None:
label_preds = pred_boxes['label_preds']
scores = pred_boxes['scores']
label_preds = label_preds.detach().cpu().numpy()
for j in range(box3d_depth.shape[0]):
bbox_lidar = box3d_depth[j] # [7] in lidar
bbox_lidar_bottom = bbox_lidar.copy()
pred_list_i.append(
(label_preds[j], bbox_lidar_bottom, scores[j]))
result.append(pred_list_i)
else:
result.append(pred_list_i)
return result
def format_results(self, outputs):
results = []
for output in outputs:
result = self._generate_annotations(output)
results.append(result)
return results
def evaluate(self, results, metric=None): def evaluate(self, results, metric=None):
"""Evaluate. """Evaluate.
...@@ -139,12 +120,17 @@ class Custom3DDataset(Dataset): ...@@ -139,12 +120,17 @@ class Custom3DDataset(Dataset):
Evaluation in indoor protocol. Evaluation in indoor protocol.
Args: Args:
results (list): List of result. results (list[dict]): List of results.
metric (list[float]): AP IoU thresholds. metric (list[float]): AP IoU thresholds.
""" """
results = self.format_results(results)
from mmdet3d.core.evaluation import indoor_eval from mmdet3d.core.evaluation import indoor_eval
assert len(metric) > 0 assert isinstance(
results, list), f'Expect results to be list, got {type(results)}.'
assert len(results) > 0, f'Expect length of results > 0.'
assert isinstance(
results[0], dict
), f'Expect elements in results to be dict, got {type(results[0])}.'
assert len(metric) > 0, f'Expect length of metric > 0.'
gt_annos = [info['annos'] for info in self.data_infos] gt_annos = [info['annos'] for info in self.data_infos]
label2cat = {i: cat_id for i, cat_id in enumerate(self.CLASSES)} label2cat = {i: cat_id for i, cat_id in enumerate(self.CLASSES)}
ret_dict = indoor_eval(gt_annos, results, metric, label2cat) ret_dict = indoor_eval(gt_annos, results, metric, label2cat)
......
...@@ -26,6 +26,8 @@ class IndoorFlipData(object): ...@@ -26,6 +26,8 @@ class IndoorFlipData(object):
points = results['points'] points = results['points']
gt_bboxes_3d = results['gt_bboxes_3d'] gt_bboxes_3d = results['gt_bboxes_3d']
aligned = True if gt_bboxes_3d.shape[1] == 6 else False aligned = True if gt_bboxes_3d.shape[1] == 6 else False
results['flip_yz'] = False
results['flip_xz'] = False
if np.random.random() < self.flip_ratio_yz: if np.random.random() < self.flip_ratio_yz:
# Flipping along the YZ plane # Flipping along the YZ plane
points[:, 0] = -1 * points[:, 0] points[:, 0] = -1 * points[:, 0]
...@@ -203,6 +205,7 @@ class IndoorGlobalRotScale(object): ...@@ -203,6 +205,7 @@ class IndoorGlobalRotScale(object):
else: else:
gt_bboxes_3d[:, :3] = np.dot(gt_bboxes_3d[:, :3], rot_mat.T) gt_bboxes_3d[:, :3] = np.dot(gt_bboxes_3d[:, :3], rot_mat.T)
gt_bboxes_3d[:, 6] -= rot_angle gt_bboxes_3d[:, 6] -= rot_angle
results['rot_angle'] = rot_angle
if self.scale_range is not None: if self.scale_range is not None:
assert len(self.scale_range) == 2, \ assert len(self.scale_range) == 2, \
...@@ -218,6 +221,8 @@ class IndoorGlobalRotScale(object): ...@@ -218,6 +221,8 @@ class IndoorGlobalRotScale(object):
if self.shift_height: if self.shift_height:
points[:, -1] *= scale_ratio points[:, -1] *= scale_ratio
results['scale_ratio'] = scale_ratio
results['points'] = points results['points'] = points
results['gt_bboxes_3d'] = gt_bboxes_3d results['gt_bboxes_3d'] = gt_bboxes_3d
return results return results
......
...@@ -30,11 +30,12 @@ class ScanNetDataset(Custom3DDataset): ...@@ -30,11 +30,12 @@ class ScanNetDataset(Custom3DDataset):
# Use index to get the annos, thus the evalhook could also use this api # Use index to get the annos, thus the evalhook could also use this api
info = self.data_infos[index] info = self.data_infos[index]
if info['annos']['gt_num'] != 0: if info['annos']['gt_num'] != 0:
gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'] # k, 6 gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
gt_labels_3d = info['annos']['class'] np.float32) # k, 6
gt_labels_3d = info['annos']['class'].astype(np.long)
else: else:
gt_bboxes_3d = np.zeros((0, 6), dtype=np.float32) gt_bboxes_3d = np.zeros((0, 6), dtype=np.float32)
gt_labels_3d = np.zeros(0, ) gt_labels_3d = np.zeros((0, ), dtype=np.long)
sample_idx = info['point_cloud']['lidar_idx'] sample_idx = info['point_cloud']['lidar_idx']
pts_instance_mask_path = osp.join(self.data_root, pts_instance_mask_path = osp.join(self.data_root,
f'{sample_idx}_ins_label.npy') f'{sample_idx}_ins_label.npy')
......
...@@ -29,11 +29,12 @@ class SUNRGBDDataset(Custom3DDataset): ...@@ -29,11 +29,12 @@ class SUNRGBDDataset(Custom3DDataset):
# Use index to get the annos, thus the evalhook could also use this api # Use index to get the annos, thus the evalhook could also use this api
info = self.data_infos[index] info = self.data_infos[index]
if info['annos']['gt_num'] != 0: if info['annos']['gt_num'] != 0:
gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'] # k, 6 gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
gt_labels_3d = info['annos']['class'] np.float32) # k, 6
gt_labels_3d = info['annos']['class'].astype(np.long)
else: else:
gt_bboxes_3d = np.zeros((0, 7), dtype=np.float32) gt_bboxes_3d = np.zeros((0, 7), dtype=np.float32)
gt_labels_3d = np.zeros(0, ) gt_labels_3d = np.zeros((0, ), dtype=np.long)
anns_results = dict( anns_results = dict(
gt_bboxes_3d=gt_bboxes_3d, gt_labels_3d=gt_labels_3d) gt_bboxes_3d=gt_bboxes_3d, gt_labels_3d=gt_labels_3d)
......
...@@ -164,6 +164,7 @@ def test_config_data_pipeline(): ...@@ -164,6 +164,7 @@ def test_config_data_pipeline():
True) else 'polygon' True) else 'polygon'
results = dict( results = dict(
filename='test_img.png', filename='test_img.png',
ori_filename='test_img.png',
img=img, img=img,
img_shape=img.shape, img_shape=img.shape,
ori_shape=img.shape, ori_shape=img.shape,
...@@ -171,6 +172,7 @@ def test_config_data_pipeline(): ...@@ -171,6 +172,7 @@ def test_config_data_pipeline():
gt_labels=np.array([1], dtype=np.int64), gt_labels=np.array([1], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], mode=mode), gt_masks=dummy_masks(img.shape[0], img.shape[1], mode=mode),
) )
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes'] results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks'] results['mask_fields'] = ['gt_masks']
output_results = train_pipeline(results) output_results = train_pipeline(results)
...@@ -179,6 +181,7 @@ def test_config_data_pipeline(): ...@@ -179,6 +181,7 @@ def test_config_data_pipeline():
print('Test testing data pipeline: \n{!r}'.format(test_pipeline)) print('Test testing data pipeline: \n{!r}'.format(test_pipeline))
results = dict( results = dict(
filename='test_img.png', filename='test_img.png',
ori_filename='test_img.png',
img=img, img=img,
img_shape=img.shape, img_shape=img.shape,
ori_shape=img.shape, ori_shape=img.shape,
...@@ -186,6 +189,7 @@ def test_config_data_pipeline(): ...@@ -186,6 +189,7 @@ def test_config_data_pipeline():
gt_labels=np.array([1], dtype=np.int64), gt_labels=np.array([1], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], mode=mode), gt_masks=dummy_masks(img.shape[0], img.shape[1], mode=mode),
) )
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes'] results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks'] results['mask_fields'] = ['gt_masks']
output_results = test_pipeline(results) output_results = test_pipeline(results)
...@@ -196,6 +200,7 @@ def test_config_data_pipeline(): ...@@ -196,6 +200,7 @@ def test_config_data_pipeline():
train_pipeline)) train_pipeline))
results = dict( results = dict(
filename='test_img.png', filename='test_img.png',
ori_filename='test_img.png',
img=img, img=img,
img_shape=img.shape, img_shape=img.shape,
ori_shape=img.shape, ori_shape=img.shape,
...@@ -204,6 +209,7 @@ def test_config_data_pipeline(): ...@@ -204,6 +209,7 @@ def test_config_data_pipeline():
gt_masks=dummy_masks( gt_masks=dummy_masks(
img.shape[0], img.shape[1], num_obj=0, mode=mode), img.shape[0], img.shape[1], num_obj=0, mode=mode),
) )
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes'] results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks'] results['mask_fields'] = ['gt_masks']
output_results = train_pipeline(results) output_results = train_pipeline(results)
...@@ -213,6 +219,7 @@ def test_config_data_pipeline(): ...@@ -213,6 +219,7 @@ def test_config_data_pipeline():
test_pipeline)) test_pipeline))
results = dict( results = dict(
filename='test_img.png', filename='test_img.png',
ori_filename='test_img.png',
img=img, img=img,
img_shape=img.shape, img_shape=img.shape,
ori_shape=img.shape, ori_shape=img.shape,
...@@ -221,6 +228,7 @@ def test_config_data_pipeline(): ...@@ -221,6 +228,7 @@ def test_config_data_pipeline():
gt_masks=dummy_masks( gt_masks=dummy_masks(
img.shape[0], img.shape[1], num_obj=0, mode=mode), img.shape[0], img.shape[1], num_obj=0, mode=mode),
) )
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes'] results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks'] results['mask_fields'] = ['gt_masks']
output_results = test_pipeline(results) output_results = test_pipeline(results)
......
import numpy as np import numpy as np
import torch
from mmdet3d.core.evaluation.indoor_eval import average_precision, indoor_eval from mmdet3d.core.evaluation.indoor_eval import average_precision, indoor_eval
def test_indoor_eval(): def test_indoor_eval():
det_infos = [[[[ det_infos = [{
4.0, 'labels_3d':
[ torch.Tensor([4, 4, 3, 17, 2]),
'boxes_3d':
torch.Tensor([[
2.8734498, -0.187645, -0.02600911, 0.6761766, 0.56542563, 2.8734498, -0.187645, -0.02600911, 0.6761766, 0.56542563,
0.5953976, 0. 0.5953976, 0.
], 0.9980684
], ],
[
4.0,
[ [
0.4031701, -3.2346897, 0.07118589, 0.73209894, 0.4031701, -3.2346897, 0.07118589, 0.73209894,
0.8711227, 0.5148243, 0. 0.8711227, 0.5148243, 0.
], 0.9747082
], ],
[
3.0,
[ [
-1.274147, -2.351935, 0.07428858, 1.4534658, -1.274147, -2.351935, 0.07428858, 1.4534658,
2.563081, 0.8587492, 0. 2.563081, 0.8587492, 0.
], 0.9709939
], ],
[
17.0,
[ [
3.2214177, 0.7899204, 0.03836718, 0.05321002, 3.2214177, 0.7899204, 0.03836718, 0.05321002,
1.2607929, 0.1411697, 0. 1.2607929, 0.1411697, 0.
], 0.9482147
], ],
[
2.0,
[ [
-1.6804854, 2.399011, -0.13099639, 0.5608963, -1.6804854, 2.399011, -0.13099639, 0.5608963,
0.5052759, 0.6770297, 0. 0.5052759, 0.6770297, 0.
], 0.84311247 ]]),
]]], 'scores_3d':
[[[ torch.Tensor([0.9980684, 0.9747082, 0.9709939, 0.9482147, 0.84311247])
17.0, }, {
[ 'labels_3d':
3.2112048e+00, 5.6918913e-01, -8.6143613e-04, torch.Tensor([17.0, 17.0, 3.0, 4.0, 17.0]),
1.1942449e-01, 1.2988183e+00, 1.9952521e-01, 'boxes_3d':
0.0000000e+00 torch.Tensor([[
], 0.9965866 3.2112048e+00, 5.6918913e-01, -8.6143613e-04, 1.1942449e-01,
1.2988183e+00, 1.9952521e-01, 0.0000000e+00
], ],
[
17.0,
[ [
3.248133, 0.4324184, 0.20038621, 0.17225507, 3.248133, 0.4324184, 0.20038621, 0.17225507,
1.2736976, 0.32598814, 0. 1.2736976, 0.32598814, 0.
], 0.99507546
], ],
[
3.0,
[ [
-1.2793612, -2.3155289, 0.15598366, 1.2822601, -1.2793612, -2.3155289, 0.15598366, 1.2822601,
2.2253945, 0.8361754, 0. 2.2253945, 0.8361754, 0.
], 0.9916463
], ],
[
4.0,
[ [
2.8716104, -0.26416883, -0.04933786, 0.8190681, 2.8716104, -0.26416883, -0.04933786, 0.8190681,
0.60294986, 0.5769499, 0. 0.60294986, 0.5769499, 0.
], 0.9702634
], ],
[
17.0,
[ [
-2.2109854, 0.19445783, -0.01614259, 0.40659013, -2.2109854, 0.19445783, -0.01614259, 0.40659013,
0.35370222, 0.3290567, 0. 0.35370222, 0.3290567, 0.
], 0.95803124 ]]),
]]]] 'scores_3d':
torch.Tensor([0.9965866, 0.99507546, 0.9916463, 0.9702634, 0.95803124])
}]
label2cat = { label2cat = {
0: 'cabinet', 0: 'cabinet',
...@@ -148,7 +131,8 @@ def test_indoor_eval(): ...@@ -148,7 +131,8 @@ def test_indoor_eval():
0.15343043, 2.24693251, 0.22470728, 0.49632657, 0.15343043, 2.24693251, 0.22470728, 0.49632657,
0.47379827, 0.43063563 0.47379827, 0.43063563
]]), ]]),
'class': [3, 4, 4, 17, 2, 2, 2, 7, 11, 8, 17, 2] 'class':
np.array([3, 4, 4, 17, 2, 2, 2, 7, 11, 8, 17, 2])
}, { }, {
'gt_num': 'gt_num':
12, 12,
...@@ -201,7 +185,8 @@ def test_indoor_eval(): ...@@ -201,7 +185,8 @@ def test_indoor_eval():
2.60432816, 1.62303996, 0.42025632, 1.23775268, 2.60432816, 1.62303996, 0.42025632, 1.23775268,
0.51761389, 0.66034317 0.51761389, 0.66034317
]]), ]]),
'class': [4, 11, 3, 7, 8, 2, 2, 17, 4, 2, 2, 17] 'class':
np.array([4, 11, 3, 7, 8, 2, 2, 17, 4, 2, 2, 17])
}] }]
ret_value = indoor_eval(gt_annos, det_infos, [0.25, 0.5], label2cat) ret_value = indoor_eval(gt_annos, det_infos, [0.25, 0.5], label2cat)
......
import numpy as np import numpy as np
import pytest
import torch import torch
from mmdet3d.datasets import ScanNetDataset from mmdet3d.datasets import ScanNetDataset
...@@ -38,6 +37,9 @@ def test_getitem(): ...@@ -38,6 +37,9 @@ def test_getitem():
keys=[ keys=[
'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask', 'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask',
'pts_instance_mask' 'pts_instance_mask'
],
meta_keys=[
'file_name', 'flip_xz', 'flip_yz', 'sample_idx', 'rot_angle'
]), ]),
] ]
...@@ -48,7 +50,17 @@ def test_getitem(): ...@@ -48,7 +50,17 @@ def test_getitem():
gt_labels = data['gt_labels_3d']._data gt_labels = data['gt_labels_3d']._data
pts_semantic_mask = data['pts_semantic_mask']._data pts_semantic_mask = data['pts_semantic_mask']._data
pts_instance_mask = data['pts_instance_mask']._data pts_instance_mask = data['pts_instance_mask']._data
file_name = data['img_meta']._data['file_name']
flip_xz = data['img_meta']._data['flip_xz']
flip_yz = data['img_meta']._data['flip_yz']
rot_angle = data['img_meta']._data['rot_angle']
sample_idx = data['img_meta']._data['sample_idx']
assert file_name == './tests/data/scannet/' \
'scannet_train_instance_data/scene0000_00_vert.npy'
assert flip_xz is True
assert flip_yz is True
assert abs(rot_angle - (-0.005471397477913809)) < 1e-5
assert sample_idx == 'scene0000_00'
expected_points = np.array( expected_points = np.array(
[[-2.9078157, -1.9569951, 2.3543026, 2.389488], [[-2.9078157, -1.9569951, 2.3543026, 2.389488],
[-0.71360034, -3.4359822, 2.1330001, 2.1681855], [-0.71360034, -3.4359822, 2.1330001, 2.1681855],
...@@ -102,40 +114,35 @@ def test_getitem(): ...@@ -102,40 +114,35 @@ def test_getitem():
def test_evaluate(): def test_evaluate():
if not torch.cuda.is_available():
pytest.skip()
root_path = './tests/data/scannet' root_path = './tests/data/scannet'
ann_file = './tests/data/scannet/scannet_infos.pkl' ann_file = './tests/data/scannet/scannet_infos.pkl'
scannet_dataset = ScanNetDataset(root_path, ann_file) scannet_dataset = ScanNetDataset(root_path, ann_file)
results = [] results = []
pred_boxes = dict() pred_boxes = dict()
pred_boxes['box3d_lidar'] = np.array([[ pred_boxes['boxes_3d'] = torch.Tensor(
[[
3.52074146e+00, -1.48129511e+00, 1.57035351e+00, 2.31956959e-01, 3.52074146e+00, -1.48129511e+00, 1.57035351e+00, 2.31956959e-01,
1.74445975e+00, 5.72351933e-01, 0 1.74445975e+00, 5.72351933e-01, 0
], ],
[ [
-3.48033905e+00, -2.90395617e+00, -3.48033905e+00, -2.90395617e+00, 1.19105673e+00, 1.70723915e-01,
1.19105673e+00, 1.70723915e-01,
6.60776615e-01, 6.71535969e-01, 0 6.60776615e-01, 6.71535969e-01, 0
], ],
[ [
2.19867110e+00, -1.14655101e+00, 2.19867110e+00, -1.14655101e+00, 9.25755501e-03, 2.53463078e+00,
9.25755501e-03, 2.53463078e+00,
5.41841269e-01, 1.21447623e+00, 0 5.41841269e-01, 1.21447623e+00, 0
], ],
[ [
2.50163722, -2.91681337, 2.50163722, -2.91681337, 0.82875049, 1.84280431, 0.61697435,
0.82875049, 1.84280431, 0.28697443, 0
0.61697435, 0.28697443, 0
], ],
[ [
-0.01335114, 3.3114481, -0.01335114, 3.3114481, -0.00895238, 3.85815716, 0.44081616,
-0.00895238, 3.85815716, 2.16034412, 0
0.44081616, 2.16034412, 0
]]) ]])
pred_boxes['label_preds'] = torch.Tensor([6, 6, 4, 9, 11]).cuda() pred_boxes['labels_3d'] = torch.Tensor([6, 6, 4, 9, 11])
pred_boxes['scores'] = torch.Tensor([0.5, 1.0, 1.0, 1.0, 1.0]).cuda() pred_boxes['scores_3d'] = torch.Tensor([0.5, 1.0, 1.0, 1.0, 1.0])
results.append([pred_boxes]) results.append(pred_boxes)
metric = [0.25, 0.5] metric = [0.25, 0.5]
ret_dict = scannet_dataset.evaluate(results, metric) ret_dict = scannet_dataset.evaluate(results, metric)
table_average_precision_25 = ret_dict['table_AP_0.25'] table_average_precision_25 = ret_dict['table_AP_0.25']
......
import numpy as np import numpy as np
import pytest
import torch import torch
from mmdet3d.datasets import SUNRGBDDataset from mmdet3d.datasets import SUNRGBDDataset
...@@ -27,7 +26,12 @@ def test_getitem(): ...@@ -27,7 +26,12 @@ def test_getitem():
dict(type='IndoorPointSample', num_points=5), dict(type='IndoorPointSample', num_points=5),
dict(type='DefaultFormatBundle3D', class_names=class_names), dict(type='DefaultFormatBundle3D', class_names=class_names),
dict( dict(
type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']), type='Collect3D',
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'],
meta_keys=[
'file_name', 'flip_xz', 'flip_yz', 'sample_idx', 'scale_ratio',
'rot_angle'
]),
] ]
sunrgbd_dataset = SUNRGBDDataset(root_path, ann_file, pipelines) sunrgbd_dataset = SUNRGBDDataset(root_path, ann_file, pipelines)
...@@ -35,7 +39,19 @@ def test_getitem(): ...@@ -35,7 +39,19 @@ def test_getitem():
points = data['points']._data points = data['points']._data
gt_bboxes_3d = data['gt_bboxes_3d']._data gt_bboxes_3d = data['gt_bboxes_3d']._data
gt_labels_3d = data['gt_labels_3d']._data gt_labels_3d = data['gt_labels_3d']._data
file_name = data['img_meta']._data['file_name']
flip_xz = data['img_meta']._data['flip_xz']
flip_yz = data['img_meta']._data['flip_yz']
scale_ratio = data['img_meta']._data['scale_ratio']
rot_angle = data['img_meta']._data['rot_angle']
sample_idx = data['img_meta']._data['sample_idx']
assert file_name == './tests/data/sunrgbd/sunrgbd_trainval' \
'/lidar/000001.npy'
assert flip_xz is False
assert flip_yz is True
assert abs(scale_ratio - 1.0308290128214932) < 1e-5
assert abs(rot_angle - 0.22534577750874518) < 1e-5
assert sample_idx == 1
expected_points = np.array( expected_points = np.array(
[[0.6570105, 1.5538014, 0.24514851, 1.0165423], [[0.6570105, 1.5538014, 0.24514851, 1.0165423],
[0.656101, 1.558591, 0.21755838, 0.98895216], [0.656101, 1.558591, 0.21755838, 0.98895216],
...@@ -86,15 +102,12 @@ def test_getitem(): ...@@ -86,15 +102,12 @@ def test_getitem():
def test_evaluate(): def test_evaluate():
if not torch.cuda.is_available():
pytest.skip()
root_path = './tests/data/sunrgbd' root_path = './tests/data/sunrgbd'
ann_file = './tests/data/sunrgbd/sunrgbd_infos.pkl' ann_file = './tests/data/sunrgbd/sunrgbd_infos.pkl'
sunrgbd_dataset = SUNRGBDDataset(root_path, ann_file) sunrgbd_dataset = SUNRGBDDataset(root_path, ann_file)
results = [] results = []
pred_boxes = dict() pred_boxes = dict()
pred_boxes['box3d_lidar'] = np.array( pred_boxes['boxes_3d'] = torch.Tensor(
[[ [[
4.168696, -1.047307, -1.231666, 1.887584, 2.30207, 1.969614, 4.168696, -1.047307, -1.231666, 1.887584, 2.30207, 1.969614,
1.69564944 1.69564944
...@@ -104,9 +117,9 @@ def test_evaluate(): ...@@ -104,9 +117,9 @@ def test_evaluate():
1.64999513 1.64999513
], [1.904545, 1.086364, -1.2, 1.563134, 0.71281, 2.104546, ], [1.904545, 1.086364, -1.2, 1.563134, 0.71281, 2.104546,
0.1022069]]) 0.1022069]])
pred_boxes['label_preds'] = torch.Tensor([0, 7, 6]).cuda() pred_boxes['labels_3d'] = torch.Tensor([0, 7, 6])
pred_boxes['scores'] = torch.Tensor([0.5, 1.0, 1.0]).cuda() pred_boxes['scores_3d'] = torch.Tensor([0.5, 1.0, 1.0])
results.append([pred_boxes]) results.append(pred_boxes)
metric = [0.25, 0.5] metric = [0.25, 0.5]
ap_dict = sunrgbd_dataset.evaluate(results, metric) ap_dict = sunrgbd_dataset.evaluate(results, metric)
bed_precision_25 = ap_dict['bed_AP_0.25'] bed_precision_25 = ap_dict['bed_AP_0.25']
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment