Unverified Commit 96cab60d authored by Yuefeng Wu's avatar Yuefeng Wu Committed by GitHub
Browse files

[Fix]: fix multi-batch show in detectors (#120)

parent 1cd50481
...@@ -48,8 +48,8 @@ class Base3DDetector(BaseDetector): ...@@ -48,8 +48,8 @@ class Base3DDetector(BaseDetector):
Note this setting will change the expected inputs. When Note this setting will change the expected inputs. When
`return_loss=True`, img and img_metas are single-nested (i.e. `return_loss=True`, img and img_metas are single-nested (i.e.
torch.Tensor and list[dict]), and when `resturn_loss=False`, img torch.Tensor and list[dict]), and when `resturn_loss=False`, img and
and img_metas should be double nested (i.e. list[torch.Tensor], img_metas should be double nested (i.e. list[torch.Tensor],
list[list[dict]]), with the outer list indicating test time list[list[dict]]), with the outer list indicating test time
augmentations. augmentations.
""" """
...@@ -62,42 +62,48 @@ class Base3DDetector(BaseDetector): ...@@ -62,42 +62,48 @@ class Base3DDetector(BaseDetector):
"""Results visualization. """Results visualization.
Args: Args:
data (dict): Input points and the information of the sample. data (list[dict]): Input points and the information of the sample.
result (dict): Prediction results. result (list[dict]): Prediction results.
out_dir (str): Output directory of visualization result. out_dir (str): Output directory of visualization result.
""" """
if isinstance(data['points'][0], DC): for batch_id in range(len(result)):
points = data['points'][0]._data[0][0].numpy() if isinstance(data['points'][0], DC):
elif mmcv.is_list_of(data['points'][0], torch.Tensor): points = data['points'][0]._data[0][batch_id].numpy()
points = data['points'][0][0] elif mmcv.is_list_of(data['points'][0], torch.Tensor):
else: points = data['points'][0][batch_id]
ValueError(f"Unsupported data type {type(data['points'][0])} " else:
f'for visualization!') ValueError(f"Unsupported data type {type(data['points'][0])} "
if isinstance(data['img_metas'][0], DC): f'for visualization!')
pts_filename = data['img_metas'][0]._data[0][0]['pts_filename'] if isinstance(data['img_metas'][0], DC):
box_mode_3d = data['img_metas'][0]._data[0][0]['box_mode_3d'] pts_filename = data['img_metas'][0]._data[0][batch_id][
elif mmcv.is_list_of(data['img_metas'][0], dict): 'pts_filename']
pts_filename = data['img_metas'][0][0]['pts_filename'] box_mode_3d = data['img_metas'][0]._data[0][batch_id][
box_mode_3d = data['img_metas'][0][0]['box_mode_3d'] 'box_mode_3d']
else: elif mmcv.is_list_of(data['img_metas'][0], dict):
ValueError(f"Unsupported data type {type(data['img_metas'][0])} " pts_filename = data['img_metas'][0][batch_id]['pts_filename']
f'for visualization!') box_mode_3d = data['img_metas'][0][batch_id]['box_mode_3d']
file_name = osp.split(pts_filename)[-1].split('.')[0] else:
ValueError(
f"Unsupported data type {type(data['img_metas'][0])} "
f'for visualization!')
file_name = osp.split(pts_filename)[-1].split('.')[0]
assert out_dir is not None, 'Expect out_dir, got none.' assert out_dir is not None, 'Expect out_dir, got none.'
pred_bboxes = copy.deepcopy(result['boxes_3d'].tensor.numpy()) pred_bboxes = copy.deepcopy(
# for now we convert points into depth mode result[batch_id]['boxes_3d'].tensor.numpy())
if box_mode_3d == Box3DMode.DEPTH: # for now we convert points into depth mode
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2 if box_mode_3d == Box3DMode.DEPTH:
elif box_mode_3d == Box3DMode.CAM or box_mode_3d == Box3DMode.LIDAR: pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
points = points[..., [1, 0, 2]] elif (box_mode_3d == Box3DMode.CAM) or (box_mode_3d
points[..., 0] *= -1 == Box3DMode.LIDAR):
pred_bboxes = Box3DMode.convert(pred_bboxes, box_mode_3d, points = points[..., [1, 0, 2]]
Box3DMode.DEPTH) points[..., 0] *= -1
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2 pred_bboxes = Box3DMode.convert(pred_bboxes, box_mode_3d,
else: Box3DMode.DEPTH)
ValueError( pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
f'Unsupported box_mode_3d {box_mode_3d} for convertion!') else:
ValueError(
f'Unsupported box_mode_3d {box_mode_3d} for convertion!')
show_result(points, None, pred_bboxes, out_dir, file_name) show_result(points, None, pred_bboxes, out_dir, file_name)
import copy
import mmcv import mmcv
import torch import torch
from mmcv.parallel import DataContainer as DC from mmcv.parallel import DataContainer as DC
...@@ -454,37 +455,43 @@ class MVXTwoStageDetector(Base3DDetector): ...@@ -454,37 +455,43 @@ class MVXTwoStageDetector(Base3DDetector):
result (dict): Prediction results. result (dict): Prediction results.
out_dir (str): Output directory of visualization result. out_dir (str): Output directory of visualization result.
""" """
if isinstance(data['points'][0], DC): for batch_id in range(len(result)):
points = data['points'][0]._data[0][0].numpy() if isinstance(data['points'][0], DC):
elif mmcv.is_list_of(data['points'][0], torch.Tensor): points = data['points'][0]._data[0][batch_id].numpy()
points = data['points'][0][0] elif mmcv.is_list_of(data['points'][0], torch.Tensor):
else: points = data['points'][0][batch_id]
ValueError(f"Unsupported data type {type(data['points'][0])} " else:
f'for visualization!') ValueError(f"Unsupported data type {type(data['points'][0])} "
if isinstance(data['img_metas'][0], DC): f'for visualization!')
pts_filename = data['img_metas'][0]._data[0][0]['pts_filename'] if isinstance(data['img_metas'][0], DC):
box_mode_3d = data['img_metas'][0]._data[0][0]['box_mode_3d'] pts_filename = data['img_metas'][0]._data[0][batch_id][
elif mmcv.is_list_of(data['img_metas'][0], dict): 'pts_filename']
pts_filename = data['img_metas'][0][0]['pts_filename'] box_mode_3d = data['img_metas'][0]._data[0][batch_id][
box_mode_3d = data['img_metas'][0][0]['box_mode_3d'] 'box_mode_3d']
else: elif mmcv.is_list_of(data['img_metas'][0], dict):
ValueError(f"Unsupported data type {type(data['img_metas'][0])} " pts_filename = data['img_metas'][0][batch_id]['pts_filename']
f'for visualization!') box_mode_3d = data['img_metas'][0][batch_id]['box_mode_3d']
file_name = osp.split(pts_filename)[-1].split('.')[0] else:
ValueError(
assert out_dir is not None, 'Expect out_dir, got none.' f"Unsupported data type {type(data['img_metas'][0])} "
inds = result['pts_bbox']['scores_3d'] > 0.1 f'for visualization!')
pred_bboxes = result['pts_bbox']['boxes_3d'][inds].tensor.numpy() file_name = osp.split(pts_filename)[-1].split('.')[0]
# for now we convert points into depth mode
if box_mode_3d == Box3DMode.DEPTH: assert out_dir is not None, 'Expect out_dir, got none.'
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2 inds = result[batch_id]['pts_bbox']['scores_3d'] > 0.1
elif box_mode_3d == Box3DMode.CAM or box_mode_3d == Box3DMode.LIDAR: pred_bboxes = copy.deepcopy(
points = points[..., [1, 0, 2]] result[batch_id]['pts_bbox']['boxes_3d'][inds].tensor.numpy())
points[..., 0] *= -1 # for now we convert points into depth mode
pred_bboxes = Box3DMode.convert(pred_bboxes, box_mode_3d, if box_mode_3d == Box3DMode.DEPTH:
Box3DMode.DEPTH) pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2 elif (box_mode_3d == Box3DMode.CAM) or (box_mode_3d
else: == Box3DMode.LIDAR):
ValueError( points = points[..., [1, 0, 2]]
f'Unsupported box_mode_3d {box_mode_3d} for convertion!') points[..., 0] *= -1
show_result(points, None, pred_bboxes, out_dir, file_name) pred_bboxes = Box3DMode.convert(pred_bboxes, box_mode_3d,
Box3DMode.DEPTH)
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
else:
ValueError(
f'Unsupported box_mode_3d {box_mode_3d} for convertion!')
show_result(points, None, pred_bboxes, out_dir, file_name)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment