Unverified Commit 76849785 authored by xiliu8006's avatar xiliu8006 Committed by GitHub
Browse files

[Enhance] Support nuscenes demo (#353)



* support nuscenes dataset in demo

* add convert_SyncBN in __init__

* fix meshlab visualization bug

* modify meshlab unittest

* add docstring

* add empty line in docstring
Co-authored-by: default avatarxiliu8006 <xiliu800@gmail.com>
parent 4eed122d
from .inference import inference_detector, init_detector, show_result_meshlab from .inference import (convert_SyncBN, inference_detector, init_detector,
show_result_meshlab)
from .test import single_gpu_test from .test import single_gpu_test
__all__ = [ __all__ = [
'inference_detector', 'init_detector', 'single_gpu_test', 'inference_detector', 'init_detector', 'single_gpu_test',
'show_result_meshlab' 'show_result_meshlab', 'convert_SyncBN'
] ]
...@@ -11,6 +11,22 @@ from mmdet3d.datasets.pipelines import Compose ...@@ -11,6 +11,22 @@ from mmdet3d.datasets.pipelines import Compose
from mmdet3d.models import build_detector from mmdet3d.models import build_detector
def convert_SyncBN(config):
"""Convert config's naiveSyncBN to BN.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
"""
if isinstance(config, dict):
for item in config:
if item == 'norm_cfg':
config[item]['type'] = config[item]['type']. \
replace('naiveSyncBN', 'BN')
else:
convert_SyncBN(config[item])
def init_detector(config, checkpoint=None, device='cuda:0'): def init_detector(config, checkpoint=None, device='cuda:0'):
"""Initialize a detector from config file. """Initialize a detector from config file.
...@@ -30,6 +46,7 @@ def init_detector(config, checkpoint=None, device='cuda:0'): ...@@ -30,6 +46,7 @@ def init_detector(config, checkpoint=None, device='cuda:0'):
raise TypeError('config must be a filename or Config object, ' raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}') f'but got {type(config)}')
config.model.pretrained = None config.model.pretrained = None
convert_SyncBN(config.model)
config.model.train_cfg = None config.model.train_cfg = None
model = build_detector(config.model, test_cfg=config.get('test_cfg')) model = build_detector(config.model, test_cfg=config.get('test_cfg'))
if checkpoint is not None: if checkpoint is not None:
...@@ -64,6 +81,9 @@ def inference_detector(model, pcd): ...@@ -64,6 +81,9 @@ def inference_detector(model, pcd):
pts_filename=pcd, pts_filename=pcd,
box_type_3d=box_type_3d, box_type_3d=box_type_3d,
box_mode_3d=box_mode_3d, box_mode_3d=box_mode_3d,
sweeps=[],
# set timestamp = 0
timestamp=[0],
img_fields=[], img_fields=[],
bbox3d_fields=[], bbox3d_fields=[],
pts_mask_fields=[], pts_mask_fields=[],
...@@ -100,7 +120,10 @@ def show_result_meshlab(data, result, out_dir): ...@@ -100,7 +120,10 @@ def show_result_meshlab(data, result, out_dir):
assert out_dir is not None, 'Expect out_dir, got none.' assert out_dir is not None, 'Expect out_dir, got none.'
pred_bboxes = result[0]['boxes_3d'].tensor.numpy() if 'pts_bbox' in result[0].keys():
pred_bboxes = result[0]['pts_bbox']['boxes_3d'].tensor.numpy()
else:
pred_bboxes = result[0]['boxes_3d'].tensor.numpy()
# for now we convert points into depth mode # for now we convert points into depth mode
if data['img_metas'][0][0]['box_mode_3d'] != Box3DMode.DEPTH: if data['img_metas'][0][0]['box_mode_3d'] != Box3DMode.DEPTH:
points = points[..., [1, 0, 2]] points = points[..., [1, 0, 2]]
...@@ -108,7 +131,5 @@ def show_result_meshlab(data, result, out_dir): ...@@ -108,7 +131,5 @@ def show_result_meshlab(data, result, out_dir):
pred_bboxes = Box3DMode.convert(pred_bboxes, pred_bboxes = Box3DMode.convert(pred_bboxes,
data['img_metas'][0][0]['box_mode_3d'], data['img_metas'][0][0]['box_mode_3d'],
Box3DMode.DEPTH) Box3DMode.DEPTH)
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2 show_result(points, None, pred_bboxes, out_dir, file_name, show=False)
else: return out_dir, file_name
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
show_result(points, None, pred_bboxes, out_dir, file_name)
...@@ -79,9 +79,9 @@ def show_result(points, gt_bboxes, pred_bboxes, out_dir, filename, show=True): ...@@ -79,9 +79,9 @@ def show_result(points, gt_bboxes, pred_bboxes, out_dir, filename, show=True):
filename (str): Filename of the current frame. filename (str): Filename of the current frame.
show (bool): Visualize the results online. show (bool): Visualize the results online.
""" """
from .open3d_vis import Visualizer
if show: if show:
from .open3d_vis import Visualizer
vis = Visualizer(points) vis = Visualizer(points)
if pred_bboxes is not None: if pred_bboxes is not None:
vis.add_bboxes(bbox3d=pred_bboxes) vis.add_bboxes(bbox3d=pred_bboxes)
......
import numpy as np
import os
import pytest import pytest
import tempfile
import torch import torch
from mmcv.parallel import MMDataParallel from mmcv.parallel import MMDataParallel
from os.path import dirname, exists, join from os.path import dirname, exists, join
from mmdet3d.apis import inference_detector, init_detector, single_gpu_test from mmdet3d.apis import (convert_SyncBN, inference_detector, init_detector,
show_result_meshlab, single_gpu_test)
from mmdet3d.core import Box3DMode
from mmdet3d.core.bbox import LiDARInstance3DBoxes
from mmdet3d.datasets import build_dataloader, build_dataset from mmdet3d.datasets import build_dataloader, build_dataset
from mmdet3d.models import build_detector from mmdet3d.models import build_detector
...@@ -32,6 +38,48 @@ def _get_config_module(fname): ...@@ -32,6 +38,48 @@ def _get_config_module(fname):
return config_mod return config_mod
def test_convert_SyncBN():
cfg = _get_config_module(
'pointpillars/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d.py')
model_cfg = cfg.model
convert_SyncBN(model_cfg)
assert model_cfg['pts_voxel_encoder']['norm_cfg']['type'] == 'BN1d'
assert model_cfg['pts_backbone']['norm_cfg']['type'] == 'BN2d'
assert model_cfg['pts_neck']['norm_cfg']['type'] == 'BN2d'
def test_show_result_meshlab():
pcd = 'tests/data/nuscenes/samples/LIDAR_TOP/n015-2018-08-02-17-16-37+' \
'0800__LIDAR_TOP__1533201470948018.pcd.bin'
box_3d = LiDARInstance3DBoxes(
torch.tensor(
[[8.7314, -1.8559, -1.5997, 0.4800, 1.2000, 1.8900, 0.0100]]))
labels_3d = torch.tensor([0])
scores_3d = torch.tensor([0.5])
points = np.random.rand(100, 4)
img_meta = dict(
pts_filename=pcd, boxes_3d=box_3d, box_mode_3d=Box3DMode.LIDAR)
data = dict(points=[[torch.tensor(points)]], img_metas=[[img_meta]])
result = [
dict(
pts_bbox=dict(
boxes_3d=box_3d, labels_3d=labels_3d, scores_3d=scores_3d))
]
temp_out_dir = tempfile.mkdtemp()
out_dir, file_name = show_result_meshlab(data, result, temp_out_dir)
expected_outfile_ply = file_name + '_pred.ply'
expected_outfile_obj = file_name + '_points.obj'
expected_outfile_ply_path = os.path.join(out_dir, file_name,
expected_outfile_ply)
expected_outfile_obj_path = os.path.join(out_dir, file_name,
expected_outfile_obj)
assert os.path.exists(expected_outfile_ply_path)
assert os.path.exists(expected_outfile_obj_path)
os.remove(expected_outfile_obj_path)
os.remove(expected_outfile_ply_path)
os.removedirs(os.path.join(temp_out_dir, file_name))
def test_inference_detector(): def test_inference_detector():
pcd = 'tests/data/kitti/training/velodyne_reduced/000000.bin' pcd = 'tests/data/kitti/training/velodyne_reduced/000000.bin'
detector_cfg = 'configs/pointpillars/hv_pointpillars_secfpn_' \ detector_cfg = 'configs/pointpillars/hv_pointpillars_secfpn_' \
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment