"orbit/utils.py" did not exist on "f2882f6e4a0661f0b050dfc3103a378b09cdc7a0"
Commit d75836ea authored by Tai-Wang's avatar Tai-Wang
Browse files

Merge branch 'master' into v1.0.0.dev0

parents 022ee2fb 13f002d7
......@@ -215,13 +215,13 @@ train_pipeline = [ # 训练流水线,更多细节请参考 mmdet3d.datasets.p
rot_range=[-0.027777777777777776, 0.027777777777777776], # 旋转角范围
scale_range=None), # 缩放尺寸范围
dict(
type='DefaultFormatBundle3D', # 默认格式打包以收集读取的所有数据,更多细节请参考 mmdet3d.datasets.pipelines.formating
type='DefaultFormatBundle3D', # 默认格式打包以收集读取的所有数据,更多细节请参考 mmdet3d.datasets.pipelines.formatting
class_names=('cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet',
'sink', 'bathtub', 'garbagebin')),
dict(
type='Collect3D', # 最后一个流程,决定哪些键值对应的数据会被输入给检测器,更多细节请参考 mmdet3d.datasets.pipelines.formating
type='Collect3D', # 最后一个流程,决定哪些键值对应的数据会被输入给检测器,更多细节请参考 mmdet3d.datasets.pipelines.formatting
keys=[
'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask',
'pts_instance_mask'
......@@ -236,12 +236,12 @@ test_pipeline = [ # 测试流水线,更多细节请参考 mmdet3d.datasets.pi
dict(type='PointSample', # 室内点采样,更多细节请参考 mmdet3d.datasets.pipelines.indoor_sample
num_points=40000), # 采样的点的数量
dict(
type='DefaultFormatBundle3D', # 默认格式打包以收集读取的所有数据,更多细节请参考 mmdet3d.datasets.pipelines.formating
type='DefaultFormatBundle3D', # 默认格式打包以收集读取的所有数据,更多细节请参考 mmdet3d.datasets.pipelines.formatting
class_names=('cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet',
'sink', 'bathtub', 'garbagebin')),
dict(type='Collect3D', # 最后一个流程,决定哪些键值对应的数据会被输入给检测器,更多细节请参考 mmdet3d.datasets.pipelines.formating
dict(type='Collect3D', # 最后一个流程,决定哪些键值对应的数据会被输入给检测器,更多细节请参考 mmdet3d.datasets.pipelines.formatting
keys=['points'])
]
eval_pipeline = [ # 模型验证或可视化所使用的流水线,更多细节请参考 mmdet3d.datasets.pipelines
......@@ -251,13 +251,13 @@ eval_pipeline = [ # 模型验证或可视化所使用的流水线,更多细
load_dim=6, # 读取的点的维度
use_dim=[0, 1, 2]), # 使用所读取点的哪些维度
dict(
type='DefaultFormatBundle3D', # 默认格式打包以收集读取的所有数据,更多细节请参考 mmdet3d.datasets.pipelines.formating
type='DefaultFormatBundle3D', # 默认格式打包以收集读取的所有数据,更多细节请参考 mmdet3d.datasets.pipelines.formatting
class_names=('cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet',
'sink', 'bathtub', 'garbagebin')),
with_label=False),
dict(type='Collect3D', # 最后一个流程,决定哪些键值对应的数据会被输入给检测器,更多细节请参考 mmdet3d.datasets.pipelines.formating
dict(type='Collect3D', # 最后一个流程,决定哪些键值对应的数据会被输入给检测器,更多细节请参考 mmdet3d.datasets.pipelines.formatting
keys=['points'])
]
data = dict(
......
......@@ -19,7 +19,7 @@ def digit_version(version_str):
mmcv_minimum_version = '1.3.8'
mmcv_maximum_version = '1.4.0'
mmcv_maximum_version = '1.5.0'
mmcv_version = digit_version(mmcv.__version__)
......
......@@ -65,6 +65,7 @@ def init_model(config, checkpoint=None, device='cuda:0'):
if 'PALETTE' in checkpoint['meta']: # 3D Segmentor
model.PALETTE = checkpoint['meta']['PALETTE']
model.cfg = config # save the config in the model for convenience
torch.cuda.set_device(device)
model.to(device)
model.eval()
return model
......
......@@ -44,7 +44,12 @@ def single_gpu_test(model,
models_3d = (Base3DDetector, Base3DSegmentor,
SingleStageMono3DDetector)
if isinstance(model.module, models_3d):
model.module.show_results(data, result, out_dir=out_dir)
model.module.show_results(
data,
result,
out_dir=out_dir,
show=show,
score_thr=show_score_thr)
# Visualize the results of MMDetection model
# 'show_result' is MMdetection visualization API
else:
......
......@@ -78,7 +78,8 @@ def show_result(points,
out_dir,
filename,
show=False,
snapshot=False):
snapshot=False,
pred_labels=None):
"""Convert results into format that is directly readable for meshlab.
Args:
......@@ -87,10 +88,11 @@ def show_result(points,
pred_bboxes (np.ndarray): Predicted boxes.
out_dir (str): Path of output directory
filename (str): Filename of the current frame.
show (bool, optional): Visualize the results online.
Defaults to False.
show (bool, optional): Visualize the results online. Defaults to False.
snapshot (bool, optional): Whether to save the online results.
Defaults to False.
pred_labels (np.ndarray, optional): Predicted labels of boxes.
Defaults to None.
"""
result_path = osp.join(out_dir, filename)
mmcv.mkdir_or_exist(result_path)
......@@ -100,7 +102,23 @@ def show_result(points,
vis = Visualizer(points)
if pred_bboxes is not None:
vis.add_bboxes(bbox3d=pred_bboxes)
if pred_labels is None:
vis.add_bboxes(bbox3d=pred_bboxes)
else:
palette = np.random.randint(
0, 255, size=(pred_labels.max() + 1, 3)) / 256
labelDict = {}
for j in range(len(pred_labels)):
i = int(pred_labels[j].numpy())
if labelDict.get(i) is None:
labelDict[i] = []
labelDict[i].append(pred_bboxes[j])
for i in labelDict:
vis.add_bboxes(
bbox3d=np.array(labelDict[i]),
bbox_color=palette[i],
points_in_box_color=palette[i])
if gt_bboxes is not None:
vis.add_bboxes(bbox3d=gt_bboxes, bbox_color=(0, 0, 1))
show_path = osp.join(result_path,
......
......@@ -60,13 +60,18 @@ class Base3DDetector(BaseDetector):
else:
return self.forward_test(**kwargs)
def show_results(self, data, result, out_dir):
def show_results(self, data, result, out_dir, show=False, score_thr=None):
"""Results visualization.
Args:
data (list[dict]): Input points and the information of the sample.
result (list[dict]): Prediction results.
out_dir (str): Output directory of visualization result.
show (bool, optional): Determines whether you are
going to show result by open3d.
Defaults to False.
score_thr (float, optional): Score threshold of bounding boxes.
Default to None.
"""
for batch_id in range(len(result)):
if isinstance(data['points'][0], DC):
......@@ -93,6 +98,12 @@ class Base3DDetector(BaseDetector):
assert out_dir is not None, 'Expect out_dir, got none.'
pred_bboxes = result[batch_id]['boxes_3d']
pred_labels = result[batch_id]['labels_3d']
if score_thr is not None:
mask = result[batch_id]['scores_3d'] > score_thr
pred_bboxes = pred_bboxes[mask]
pred_labels = pred_labels[mask]
# for now we convert points and bbox into depth mode
if (box_mode_3d == Box3DMode.CAM) or (box_mode_3d
......@@ -105,4 +116,11 @@ class Base3DDetector(BaseDetector):
ValueError(
f'Unsupported box_mode_3d {box_mode_3d} for conversion!')
pred_bboxes = pred_bboxes.tensor.cpu().numpy()
show_result(points, None, pred_bboxes, out_dir, file_name)
show_result(
points,
None,
pred_bboxes,
out_dir,
file_name,
show=show,
pred_labels=pred_labels)
......@@ -179,13 +179,20 @@ class SingleStageMono3DDetector(SingleStageDetector):
return [bbox_list]
def show_results(self, data, result, out_dir):
def show_results(self, data, result, out_dir, show=False, score_thr=None):
"""Results visualization.
Args:
data (list[dict]): Input images and the information of the sample.
result (list[dict]): Prediction results.
out_dir (str): Output directory of visualization result.
show (bool, optional): Determines whether you are
going to show result by open3d.
Defaults to False.
TODO: implement score_thr of single_stage_mono3d.
score_thr (float, optional): Score threshold of bounding boxes.
Default to None.
Not implemented yet, but it is here for unification.
"""
for batch_id in range(len(result)):
if isinstance(data['img_metas'][0], DC):
......@@ -216,4 +223,4 @@ class SingleStageMono3DDetector(SingleStageDetector):
out_dir,
file_name,
'camera',
show=True)
show=show)
......@@ -72,7 +72,9 @@ class Base3DSegmentor(BaseSegmentor):
result,
palette=None,
out_dir=None,
ignore_index=None):
ignore_index=None,
show=False,
score_thr=None):
"""Results visualization.
Args:
......@@ -85,6 +87,13 @@ class Base3DSegmentor(BaseSegmentor):
ignore_index (int, optional): The label index to be ignored, e.g.
unannotated points. If None is given, set to len(self.CLASSES).
Defaults to None.
show (bool, optional): Determines whether you are
going to show result by open3d.
Defaults to False.
TODO: implement score_thr of Base3DSegmentor.
score_thr (float, optional): Score threshold of bounding boxes.
Default to None.
Not implemented yet, but it is here for unification.
"""
assert out_dir is not None, 'Expect out_dir, got none.'
if palette is None:
......@@ -123,4 +132,4 @@ class Base3DSegmentor(BaseSegmentor):
file_name,
palette,
ignore_index,
show=True)
show=show)
# Copyright (c) Open-MMLab. All rights reserved.
__version__ = '0.17.2'
__version__ = '0.17.3'
short_version = __version__
......
docutils==0.16.0
m2r
myst-parser
opencv-python
mistune==0.8.4
-e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme
recommonmark
sphinx==4.0.2
sphinx-copybutton
sphinx_markdown_tables
torch
mmcv-full>=1.3.8,<=1.4.0
mmcv-full>=1.3.8,<=1.5.0
mmdet>=2.14.0,<=3.0.0
mmsegmentation>=0.14.1,<=1.0.0
mmcv
mmdet
mmsegmentation>=0.14.1
torch
torchvision
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment