"git@developer.sourcefind.cn:OpenDAS/torch-scatter.git" did not exist on "a1818616117f4a90b5c3c7e7ef538c37588fea68"
Commit faa0a6c4 authored by liyinhao's avatar liyinhao
Browse files

change names

parent 422d3154
...@@ -46,27 +46,6 @@ def voc_ap(rec, prec, use_07_metric=False): ...@@ -46,27 +46,6 @@ def voc_ap(rec, prec, use_07_metric=False):
return ap return ap
def boxes3d_to_bevboxes_lidar_torch(boxes3d):
"""Boxes3d to Bevboxes Lidar.
Transform 3d boxes to bev boxes.
Args:
boxes3d (tensor): [x, y, z, w, l, h, ry] in LiDAR coords.
Returns:
boxes_bev (tensor): [x1, y1, x2, y2, ry].
"""
boxes_bev = boxes3d.new(torch.Size((boxes3d.shape[0], 5)))
cu, cv = boxes3d[:, 0], boxes3d[:, 1]
half_l, half_w = boxes3d[:, 4] / 2, boxes3d[:, 3] / 2
boxes_bev[:, 0], boxes_bev[:, 1] = cu - half_w, cv - half_l
boxes_bev[:, 2], boxes_bev[:, 3] = cu + half_w, cv + half_l
boxes_bev[:, 4] = boxes3d[:, 6]
return boxes_bev
def get_iou_gpu(bb1, bb2): def get_iou_gpu(bb1, bb2):
"""Get IoU. """Get IoU.
...@@ -201,7 +180,7 @@ def eval_det_multiprocessing(pred_all, ...@@ -201,7 +180,7 @@ def eval_det_multiprocessing(pred_all,
gt_all, gt_all,
ovthresh=None, ovthresh=None,
use_07_metric=False): use_07_metric=False):
""" Evaluate Detection Multiprocessing. """Evaluate Detection Multiprocessing.
Generic functions to compute precision/recall for object detection Generic functions to compute precision/recall for object detection
for multiple classes. for multiple classes.
...@@ -321,21 +300,22 @@ class APCalculator(object): ...@@ -321,21 +300,22 @@ class APCalculator(object):
for key in sorted(ap.keys()): for key in sorted(ap.keys()):
clsname = self.class2type_map[ clsname = self.class2type_map[
key] if self.class2type_map else str(key) key] if self.class2type_map else str(key)
ret_dict['%s Average Precision %d' % ret_dict[f'{clsname}_AP_{int(iou_thresh * 100)}'] = ap[key]
(clsname, iou_thresh * 100)] = ap[key] ret_dict[f'mAP_{int(iou_thresh * 100)}'] = np.mean(
ret_dict['mAP%d' % (iou_thresh * 100)] = np.mean(list(ap.values())) list(ap.values()))
rec_list = [] rec_list = []
for key in sorted(ap.keys()): for key in sorted(ap.keys()):
clsname = self.class2type_map[ clsname = self.class2type_map[
key] if self.class2type_map else str(key) key] if self.class2type_map else str(key)
try: try:
ret_dict['%s Recall %d' % ret_dict[
(clsname, iou_thresh * 100)] = rec[key][-1] f'{clsname}_recall_{int(iou_thresh * 100)}'] = rec[
key][-1]
rec_list.append(rec[key][-1]) rec_list.append(rec[key][-1])
except TypeError: except TypeError:
ret_dict['%s Recall %d' % (clsname, iou_thresh * 100)] = 0 ret_dict[f'{clsname}_recall_{int(iou_thresh * 100)}'] = 0
rec_list.append(0) rec_list.append(0)
ret_dict['AR%d' % (iou_thresh * 100)] = np.mean(rec_list) ret_dict[f'AR_{int(iou_thresh * 100)}'] = np.mean(rec_list)
ret.append(ret_dict) ret.append(ret_dict)
return ret return ret
...@@ -373,7 +353,7 @@ def indoor_eval(gt_annos, dt_annos, metric, class2type): ...@@ -373,7 +353,7 @@ def indoor_eval(gt_annos, dt_annos, metric, class2type):
Args: Args:
gt_annos (List): GT annotations. gt_annos (List): GT annotations.
dt_annos (List): Detection annotations. dt_annos (List): Detection annotations.
metric (dict): AP IoU thresholds. metric (List[float]): AP IoU thresholds.
class2type (dict): {class: type}. class2type (dict): {class: type}.
Return: Return:
...@@ -389,17 +369,15 @@ def indoor_eval(gt_annos, dt_annos, metric, class2type): ...@@ -389,17 +369,15 @@ def indoor_eval(gt_annos, dt_annos, metric, class2type):
if gt_anno['gt_boxes_upright_depth'].shape[-1] == 6: if gt_anno['gt_boxes_upright_depth'].shape[-1] == 6:
gt_anno['gt_boxes_upright_depth'] = np.pad( gt_anno['gt_boxes_upright_depth'] = np.pad(
bbox_lidar_bottom, ((0, 0), (0, 1)), 'constant') bbox_lidar_bottom, ((0, 0), (0, 1)), 'constant')
ap_iou_thresholds = metric['AP_IOU_THRESHHOLDS'] ap_calculator = APCalculator(metric, class2type)
ap_calculator = APCalculator(ap_iou_thresholds, class2type)
ap_calculator.step(dt_annos, gt_annos) ap_calculator.step(dt_annos, gt_annos)
result_str = str() result_str = str()
result_str += 'mAP' result_str += 'mAP'
metrics_dict = {} metrics_dict = {}
metrics = ap_calculator.compute_metrics() metrics = ap_calculator.compute_metrics()
for i, iou_threshold in enumerate(ap_iou_thresholds): for i, iou_thresh in enumerate(metric):
metrics_tmp = metrics[i] metrics_tmp = metrics[i]
metrics_dict.update(metrics_tmp) metrics_dict.update(metrics_tmp)
result_str += '(%.2f):%s ' % (iou_threshold, metric_result = metrics_dict[f'mAP_{int(iou_thresh * 100)}']
metrics_dict['mAP%d' % result_str += f'({iou_thresh:.2f}:{metric_result}'
(iou_threshold * 100)])
return result_str, metrics_dict return result_str, metrics_dict
...@@ -12,26 +12,7 @@ from .pipelines import Compose ...@@ -12,26 +12,7 @@ from .pipelines import Compose
@DATASETS.register_module() @DATASETS.register_module()
class ScannetDataset(torch_data.Dataset): class ScannetDataset(torch_data.Dataset):
type2class = {
'cabinet': 0,
'bed': 1,
'chair': 2,
'sofa': 3,
'table': 4,
'door': 5,
'window': 6,
'bookshelf': 7,
'picture': 8,
'counter': 9,
'desk': 10,
'curtain': 11,
'refrigerator': 12,
'showercurtrain': 13,
'toilet': 14,
'sink': 15,
'bathtub': 16,
'garbagebin': 17
}
class2type = { class2type = {
0: 'cabinet', 0: 'cabinet',
1: 'bed', 1: 'bed',
...@@ -196,7 +177,7 @@ class ScannetDataset(torch_data.Dataset): ...@@ -196,7 +177,7 @@ class ScannetDataset(torch_data.Dataset):
return result return result
def _format_results(self, outputs): def format_results(self, outputs):
results = [] results = []
for output in outputs: for output in outputs:
result = self._generate_annotations(output) result = self._generate_annotations(output)
...@@ -210,11 +191,11 @@ class ScannetDataset(torch_data.Dataset): ...@@ -210,11 +191,11 @@ class ScannetDataset(torch_data.Dataset):
Args: Args:
results (List): List of result. results (List): List of result.
metric (dict): AP_IOU_THRESHHOLDS. metric (List[float]): AP IoU thresholds.
""" """
results = self._format_results(results) results = self.format_results(results)
from mmdet3d.core.evaluation import indoor_eval from mmdet3d.core.evaluation import indoor_eval
assert ('AP_IOU_THRESHHOLDS' in metric) assert len(metric) > 0
gt_annos = [ gt_annos = [
copy.deepcopy(info['annos']) for info in self.scannet_infos copy.deepcopy(info['annos']) for info in self.scannet_infos
] ]
......
...@@ -12,18 +12,7 @@ from .pipelines import Compose ...@@ -12,18 +12,7 @@ from .pipelines import Compose
@DATASETS.register_module() @DATASETS.register_module()
class SunrgbdDataset(torch_data.Dataset): class SunrgbdDataset(torch_data.Dataset):
type2class = {
'bed': 0,
'table': 1,
'sofa': 2,
'chair': 3,
'toilet': 4,
'desk': 5,
'dresser': 6,
'night_stand': 7,
'bookshelf': 8,
'bathtub': 9
}
class2type = { class2type = {
0: 'bed', 0: 'bed',
1: 'table', 1: 'table',
...@@ -139,19 +128,13 @@ class SunrgbdDataset(torch_data.Dataset): ...@@ -139,19 +128,13 @@ class SunrgbdDataset(torch_data.Dataset):
return np.random.choice(pool) return np.random.choice(pool)
def _generate_annotations(self, output): def _generate_annotations(self, output):
''' """Generate Annotations.
transfer input_dict & pred_dicts to anno format
which is needed by AP calculator Transform results of the model to the form of the evaluation.
return annos: a tuple (batch_pred_map_cls,batch_gt_map_cls)
batch_pred_map_cls is a list: i=0,1..bs-1 Args:
pred_list_i:[(pred_sem_cls, output (List): The output of the model.
box_params, box_score)_j] """
j=0,1..num_pred_obj -1
batch_gt_map_cls is a list: i=0,1..bs-1
gt_list_i: [(sem_cls_label, box_params)_j]
j=0,1..num_gt_obj -1
'''
result = [] result = []
bs = len(output) bs = len(output)
for i in range(bs): for i in range(bs):
...@@ -174,17 +157,25 @@ class SunrgbdDataset(torch_data.Dataset): ...@@ -174,17 +157,25 @@ class SunrgbdDataset(torch_data.Dataset):
return result return result
def _format_results(self, outputs): def format_results(self, outputs):
results = [] results = []
for output in outputs: for output in outputs:
result = self._generate_annotations(output) result = self._generate_annotations(output)
results.append(result) results.append(result)
return results return results
def evaluate(self, results, metric=None): def evaluate(self, results, metric):
results = self._format_results(results) """Evaluate.
Evaluation in indoor protocol.
Args:
results (List): List of result.
metric (List[float]): AP IoU thresholds.
"""
results = self.format_results(results)
from mmdet3d.core.evaluation import indoor_eval from mmdet3d.core.evaluation import indoor_eval
assert ('AP_IOU_THRESHHOLDS' in metric) assert len(metric) > 0
gt_annos = [ gt_annos = [
copy.deepcopy(info['annos']) for info in self.sunrgbd_infos copy.deepcopy(info['annos']) for info in self.sunrgbd_infos
] ]
......
...@@ -107,13 +107,12 @@ def test_evaluate(): ...@@ -107,13 +107,12 @@ def test_evaluate():
pred_boxes['label_preds'] = torch.Tensor([6, 6, 4, 9, 11]).cuda() pred_boxes['label_preds'] = torch.Tensor([6, 6, 4, 9, 11]).cuda()
pred_boxes['scores'] = torch.Tensor([0.5, 1.0, 1.0, 1.0, 1.0]).cuda() pred_boxes['scores'] = torch.Tensor([0.5, 1.0, 1.0, 1.0, 1.0]).cuda()
results.append([pred_boxes]) results.append([pred_boxes])
metric = dict() metric = [0.25, 0.5]
metric['AP_IOU_THRESHHOLDS'] = [0.25, 0.5]
ap_dict = scannet_dataset.evaluate(results, metric) ap_dict = scannet_dataset.evaluate(results, metric)
table_average_precision_25 = ap_dict['table Average Precision 25'] table_average_precision_25 = ap_dict['table_AP_25']
window_average_precision_25 = ap_dict['window Average Precision 25'] window_average_precision_25 = ap_dict['window_AP_25']
counter_average_precision_25 = ap_dict['counter Average Precision 25'] counter_average_precision_25 = ap_dict['counter_AP_25']
curtain_average_precision_25 = ap_dict['curtain Average Precision 25'] curtain_average_precision_25 = ap_dict['curtain_AP_25']
assert abs(table_average_precision_25 - 0.3333) < 0.01 assert abs(table_average_precision_25 - 0.3333) < 0.01
assert abs(window_average_precision_25 - 1) < 0.01 assert abs(window_average_precision_25 - 1) < 0.01
assert abs(counter_average_precision_25 - 1) < 0.01 assert abs(counter_average_precision_25 - 1) < 0.01
......
...@@ -80,12 +80,11 @@ def test_evaluate(): ...@@ -80,12 +80,11 @@ def test_evaluate():
pred_boxes['label_preds'] = torch.Tensor([0, 7, 6]).cuda() pred_boxes['label_preds'] = torch.Tensor([0, 7, 6]).cuda()
pred_boxes['scores'] = torch.Tensor([0.5, 1.0, 1.0]).cuda() pred_boxes['scores'] = torch.Tensor([0.5, 1.0, 1.0]).cuda()
results.append([pred_boxes]) results.append([pred_boxes])
metric = dict() metric = [0.25, 0.5]
metric['AP_IOU_THRESHHOLDS'] = [0.25, 0.5]
ap_dict = sunrgbd_dataset.evaluate(results, metric) ap_dict = sunrgbd_dataset.evaluate(results, metric)
bed_precision_25 = ap_dict['bed Average Precision 25'] bed_precision_25 = ap_dict['bed_AP_25']
dresser_precision_25 = ap_dict['dresser Average Precision 25'] dresser_precision_25 = ap_dict['dresser_AP_25']
night_stand_precision_25 = ap_dict['night_stand Average Precision 25'] night_stand_precision_25 = ap_dict['night_stand_AP_25']
assert abs(bed_precision_25 - 1) < 0.01 assert abs(bed_precision_25 - 1) < 0.01
assert abs(dresser_precision_25 - 1) < 0.01 assert abs(dresser_precision_25 - 1) < 0.01
assert abs(night_stand_precision_25 - 1) < 0.01 assert abs(night_stand_precision_25 - 1) < 0.01
...@@ -161,7 +161,7 @@ def main(): ...@@ -161,7 +161,7 @@ def main():
mmcv.dump(outputs, args.out) mmcv.dump(outputs, args.out)
kwargs = {} if args.options is None else args.options kwargs = {} if args.options is None else args.options
if args.format_only: if args.format_only:
dataset._format_results(outputs, **kwargs) dataset.format_results(outputs, **kwargs)
if args.eval: if args.eval:
dataset.evaluate(outputs, args.eval, **kwargs) dataset.evaluate(outputs, args.eval, **kwargs)
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment