Commit 8ed3905c authored by zhangwenwei's avatar zhangwenwei
Browse files

Merge branch 'visualize' into 'master'

Visualize

See merge request open-mmlab/mmdet.3d!60
parents 5e5b822b 349051ee
...@@ -127,3 +127,5 @@ exps/ ...@@ -127,3 +127,5 @@ exps/
*.png *.png
/data/scannet/scans/ /data/scannet/scans/
/data/sunrgbd/OFFICIAL_SUNRGBD/ /data/sunrgbd/OFFICIAL_SUNRGBD/
*.obj
*.ply
...@@ -3,6 +3,6 @@ line_length = 79 ...@@ -3,6 +3,6 @@ line_length = 79
multi_line_output = 0 multi_line_output = 0
known_standard_library = setuptools known_standard_library = setuptools
known_first_party = mmdet,mmdet3d known_first_party = mmdet,mmdet3d
known_third_party = cv2,load_scannet_data,matplotlib,mmcv,numba,numpy,nuscenes,plyfile,pycocotools,pyquaternion,pytest,scannet_utils,scipy,seaborn,shapely,skimage,sunrgbd_utils,terminaltables,torch,torchvision known_third_party = cv2,load_scannet_data,matplotlib,mmcv,numba,numpy,nuscenes,plyfile,pycocotools,pyquaternion,pytest,scannet_utils,scipy,seaborn,shapely,skimage,sunrgbd_utils,terminaltables,torch,torchvision,trimesh
no_lines_before = STDLIB,LOCALFOLDER no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY default_section = THIRDPARTY
...@@ -88,9 +88,8 @@ python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [- ...@@ -88,9 +88,8 @@ python tools/test.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [-
Optional arguments: Optional arguments:
- `RESULT_FILE`: Filename of the output results in pickle format. If not specified, the results will not be saved to a file. - `RESULT_FILE`: Filename of the output results in pickle format. If not specified, the results will not be saved to a file.
- `EVAL_METRICS`: Items to be evaluated on the results. Allowed values depend on the dataset, e.g., `proposal_fast`, `proposal`, `bbox`, `segm` are available for COCO, `mAP`, `recall` for PASCAL VOC. Cityscapes could be evaluated by `cityscapes` as well as all COCO metrics. - `EVAL_METRICS`: Items to be evaluated on the results. Allowed values depend on the dataset, e.g., `proposal_fast`, `proposal`, `bbox`, `segm` are available for COCO, `mAP`, `recall` for PASCAL VOC. Cityscapes could be evaluated by `cityscapes` as well as all COCO metrics.
- `--show`: If specified, detection results will be plotted on the images and shown in a new window. It is only applicable to single GPU testing and used for debugging and visualization. Please make sure that GUI is available in your environment, otherwise you may encounter the error like `cannot connect to X server`. - `--show`: If specified, detection results will be plotted in the silient mode. It is only applicable to single GPU testing and used for debugging and visualization. This should be used with `--show-dir`.
- `--show-dir`: If specified, detection results will be plotted on the images and saved to the specified directory. It is only applicable to single GPU testing and used for debugging and visualization. You do NOT need a GUI available in your environment for using this option. - `--show-dir`: If specified, detection results will be plotted on the `***_points.obj` and `***_pred.ply` files in the specified directory. It is only applicable to single GPU testing and used for debugging and visualization. You do NOT need a GUI available in your environment for using this option.
- `--show-score-thr`: If specified, detections with score below this threshold will be removed.
Examples: Examples:
...@@ -157,6 +156,25 @@ You will get two json files `mask_rcnn_test-dev_results.bbox.json` and `mask_rcn ...@@ -157,6 +156,25 @@ You will get two json files `mask_rcnn_test-dev_results.bbox.json` and `mask_rcn
The generated png and txt would be under `./mask_rcnn_cityscapes_test_results` directory. The generated png and txt would be under `./mask_rcnn_cityscapes_test_results` directory.
### Visualization
To see the SUNRGBD, ScanNet or KITTI points and detection results, you can run the following command
```bash
python tools/test.py ${CONFIG_FILE} ${CKPT_PATH} --show --show-dir ${SHOW_DIR}
```
Aftering running this command, plotted results ***_points.obj and ***_pred.ply files in `${SHOW_DIR}`.
To see the points, detection results and ground truth of SUNRGBD, ScanNet or KITTI during evaluation time, you can run the following command
```bash
python tools/test.py ${CONFIG_FILE} ${CKPT_PATH} --eval 'mAP' --options "show=True" "out_dir=${SHOW_DIR}"
```
After running this command, you will obtain ***_points.ob, ***_pred.ply files and ***_gt.ply in `${SHOW_DIR}`.
You can use 3D visualization software such as the [MeshLab](http://www.meshlab.net/) to open the these files under `${SHOW_DIR}` to see the 3D detection output. Specifically, open `***_points.obj` to see the input point cloud and open `***_pred.ply` to see the predicted 3D bounding boxes. This allows the inference and results generation be done in remote server and the users can open them on their host with GUI.
**Notice**: The visualization API is a little unstable since we plan to refactor these parts together with MMDetection in the future.
### Image demo ### Image demo
......
from .test import single_gpu_test
__all__ = ['single_gpu_test']
import mmcv
import torch
def single_gpu_test(model, data_loader, show=False, out_dir=None):
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for i, data in enumerate(data_loader):
with torch.no_grad():
result = model(return_loss=False, rescale=True, **data)
if show:
model.module.show_results(data, result, out_dir)
results.append(result)
batch_size = len(data['img_metas'][0].data)
for _ in range(batch_size):
prog_bar.update()
return results
...@@ -2,4 +2,5 @@ from .anchor import * # noqa: F401, F403 ...@@ -2,4 +2,5 @@ from .anchor import * # noqa: F401, F403
from .bbox import * # noqa: F401, F403 from .bbox import * # noqa: F401, F403
from .evaluation import * # noqa: F401, F403 from .evaluation import * # noqa: F401, F403
from .post_processing import * # noqa: F401, F403 from .post_processing import * # noqa: F401, F403
from .visualizer import * # noqa: F401, F403
from .voxel import * # noqa: F401, F403 from .voxel import * # noqa: F401, F403
from .show_result import show_result
__all__ = ['show_result']
import os.path as osp
import mmcv
import numpy as np
import trimesh
def _write_ply(points, out_filename):
N = points.shape[0]
fout = open(out_filename, 'w')
for i in range(N):
if points.shape[1] == 6:
c = points[i, 3:].astype(int)
fout.write(
'v %f %f %f %d %d %d\n' %
(points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2]))
else:
fout.write('v %f %f %f\n' %
(points[i, 0], points[i, 1], points[i, 2]))
fout.close()
def _write_oriented_bbox(scene_bbox, out_filename):
"""Export oriented (around Z axis) scene bbox to meshes
Args:
scene_bbox(list[ndarray] or ndarray): xyz pos of center and
3 lengths (dx,dy,dz) and heading angle around Z axis.
Y forward, X right, Z upward. heading angle of positive X is 0,
heading angle of positive Y is 90 degrees.
out_filename(str): Filename.
"""
def heading2rotmat(heading_angle):
rotmat = np.zeros((3, 3))
rotmat[2, 2] = 1
cosval = np.cos(heading_angle)
sinval = np.sin(heading_angle)
rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]])
return rotmat
def convert_oriented_box_to_trimesh_fmt(box):
ctr = box[:3]
lengths = box[3:6]
trns = np.eye(4)
trns[0:3, 3] = ctr
trns[3, 3] = 1.0
trns[0:3, 0:3] = heading2rotmat(box[6])
box_trimesh_fmt = trimesh.creation.box(lengths, trns)
return box_trimesh_fmt
if len(scene_bbox) == 0:
scene_bbox = np.zeros((1, 7))
scene = trimesh.scene.Scene()
for box in scene_bbox:
scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box))
mesh_list = trimesh.util.concatenate(scene.dump())
# save to ply file
trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply')
return
def show_result(points, gt_bboxes, pred_bboxes, out_dir, filename):
mmcv.mkdir_or_exist(out_dir)
if gt_bboxes is not None:
gt_bboxes[:, -1] *= -1
_write_oriented_bbox(gt_bboxes, osp.join(out_dir,
f'{filename}_gt.ply'))
if points is not None:
_write_ply(points, osp.join(out_dir, f'{filename}_points.obj'))
if pred_bboxes is not None:
pred_bboxes[:, -1] *= -1
_write_oriented_bbox(pred_bboxes,
osp.join(out_dir, f'{filename}_pred.ply'))
...@@ -168,7 +168,13 @@ class Custom3DDataset(Dataset): ...@@ -168,7 +168,13 @@ class Custom3DDataset(Dataset):
mmcv.dump(outputs, out) mmcv.dump(outputs, out)
return outputs, tmp_dir return outputs, tmp_dir
def evaluate(self, results, metric=None, iou_thr=(0.25, 0.5), logger=None): def evaluate(self,
results,
metric=None,
iou_thr=(0.25, 0.5),
logger=None,
show=False,
out_dir=None):
"""Evaluate. """Evaluate.
Evaluation in indoor protocol. Evaluation in indoor protocol.
...@@ -177,7 +183,13 @@ class Custom3DDataset(Dataset): ...@@ -177,7 +183,13 @@ class Custom3DDataset(Dataset):
results (list[dict]): List of results. results (list[dict]): List of results.
metric (str | list[str]): Metrics to be evaluated. metric (str | list[str]): Metrics to be evaluated.
iou_thr (list[float]): AP IoU thresholds. iou_thr (list[float]): AP IoU thresholds.
show (bool): Whether to visualize.
Default: False.
out_dir (str): Path to save the visualization results.
Default: None.
Returns:
dict: Evaluation results.
""" """
from mmdet3d.core.evaluation import indoor_eval from mmdet3d.core.evaluation import indoor_eval
assert isinstance( assert isinstance(
...@@ -197,6 +209,8 @@ class Custom3DDataset(Dataset): ...@@ -197,6 +209,8 @@ class Custom3DDataset(Dataset):
logger=logger, logger=logger,
box_type_3d=self.box_type_3d, box_type_3d=self.box_type_3d,
box_mode_3d=self.box_mode_3d) box_mode_3d=self.box_mode_3d)
if show:
self.show(results, out_dir)
return ret_dict return ret_dict
......
...@@ -9,6 +9,7 @@ import torch ...@@ -9,6 +9,7 @@ import torch
from mmcv.utils import print_log from mmcv.utils import print_log
from mmdet.datasets import DATASETS from mmdet.datasets import DATASETS
from ..core import show_result
from ..core.bbox import Box3DMode, CameraInstance3DBoxes, points_cam2img from ..core.bbox import Box3DMode, CameraInstance3DBoxes, points_cam2img
from .custom_3d import Custom3DDataset from .custom_3d import Custom3DDataset
...@@ -181,7 +182,9 @@ class KittiDataset(Custom3DDataset): ...@@ -181,7 +182,9 @@ class KittiDataset(Custom3DDataset):
metric=None, metric=None,
logger=None, logger=None,
pklfile_prefix=None, pklfile_prefix=None,
submission_prefix=None): submission_prefix=None,
show=False,
out_dir=None):
"""Evaluation in KITTI protocol. """Evaluation in KITTI protocol.
Args: Args:
...@@ -194,6 +197,10 @@ class KittiDataset(Custom3DDataset): ...@@ -194,6 +197,10 @@ class KittiDataset(Custom3DDataset):
If not specified, a temp file will be created. Default: None. If not specified, a temp file will be created. Default: None.
submission_prefix (str | None): The prefix of submission datas. submission_prefix (str | None): The prefix of submission datas.
If not specified, the submission data will not be generated. If not specified, the submission data will not be generated.
show (bool): Whether to visualize.
Default: False.
out_dir (str): Path to save the visualization results.
Default: None.
Returns: Returns:
dict[str: float]: results of each evaluation metric dict[str: float]: results of each evaluation metric
...@@ -230,6 +237,8 @@ class KittiDataset(Custom3DDataset): ...@@ -230,6 +237,8 @@ class KittiDataset(Custom3DDataset):
if tmp_dir is not None: if tmp_dir is not None:
tmp_dir.cleanup() tmp_dir.cleanup()
if show:
self.show(results, out_dir)
return ap_dict return ap_dict
def bbox2result_kitti(self, def bbox2result_kitti(self,
...@@ -508,3 +517,26 @@ class KittiDataset(Custom3DDataset): ...@@ -508,3 +517,26 @@ class KittiDataset(Custom3DDataset):
label_preds=np.zeros([0, 4]), label_preds=np.zeros([0, 4]),
sample_idx=sample_idx, sample_idx=sample_idx,
) )
def show(self, results, out_dir):
assert out_dir is not None, 'Expect out_dir, got none.'
for i, result in enumerate(results):
data_info = self.data_infos[i]
pts_path = data_info['point_cloud']['velodyne_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points = np.fromfile(
osp.join(self.root_split, 'velodyne_reduced',
f'{file_name}.bin'),
dtype=np.float32).reshape(-1, 4)
points = points[..., [1, 0, 2]]
points[..., 0] *= -1
gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor
gt_bboxes = Box3DMode.convert(gt_bboxes, Box3DMode.LIDAR,
Box3DMode.DEPTH)
gt_bboxes[..., 2] += gt_bboxes[..., 5] / 2
pred_bboxes = result['boxes_3d'].tensor.numpy()
pred_bboxes = Box3DMode.convert(pred_bboxes, Box3DMode.LIDAR,
Box3DMode.DEPTH)
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
show_result(points, gt_bboxes, pred_bboxes, out_dir, file_name)
print(results)
...@@ -7,7 +7,8 @@ import pyquaternion ...@@ -7,7 +7,8 @@ import pyquaternion
from nuscenes.utils.data_classes import Box as NuScenesBox from nuscenes.utils.data_classes import Box as NuScenesBox
from mmdet.datasets import DATASETS from mmdet.datasets import DATASETS
from ..core.bbox import LiDARInstance3DBoxes from ..core import show_result
from ..core.bbox import Box3DMode, LiDARInstance3DBoxes
from .custom_3d import Custom3DDataset from .custom_3d import Custom3DDataset
...@@ -324,7 +325,9 @@ class NuScenesDataset(Custom3DDataset): ...@@ -324,7 +325,9 @@ class NuScenesDataset(Custom3DDataset):
metric='bbox', metric='bbox',
logger=None, logger=None,
jsonfile_prefix=None, jsonfile_prefix=None,
result_names=['pts_bbox']): result_names=['pts_bbox'],
show=False,
out_dir=None):
"""Evaluation in nuScenes protocol. """Evaluation in nuScenes protocol.
Args: Args:
...@@ -335,6 +338,10 @@ class NuScenesDataset(Custom3DDataset): ...@@ -335,6 +338,10 @@ class NuScenesDataset(Custom3DDataset):
jsonfile_prefix (str | None): The prefix of json files. It includes jsonfile_prefix (str | None): The prefix of json files. It includes
the file path and the prefix of filename, e.g., "a/b/prefix". the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None. If not specified, a temp file will be created. Default: None.
show (bool): Whether to visualize.
Default: False.
out_dir (str): Path to save the visualization results.
Default: None.
Returns: Returns:
dict[str: float] dict[str: float]
...@@ -352,8 +359,30 @@ class NuScenesDataset(Custom3DDataset): ...@@ -352,8 +359,30 @@ class NuScenesDataset(Custom3DDataset):
if tmp_dir is not None: if tmp_dir is not None:
tmp_dir.cleanup() tmp_dir.cleanup()
if show:
self.show(results, out_dir)
return results_dict return results_dict
def show(self, results, out_dir):
for i, result in enumerate(results):
data_info = self.data_infos[i]
pts_path = data_info['lidar_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points = np.fromfile(pts_path, dtype=np.float32).reshape(-1, 4)
points = points[..., [1, 0, 2]]
points[..., 0] *= -1
gt_bboxes = self.get_ann_info(i)['gt_bboxes_3d'].tensor
gt_bboxes = Box3DMode.convert(gt_bboxes, Box3DMode.LIDAR,
Box3DMode.DEPTH)
gt_bboxes[..., 2] += gt_bboxes[..., 5] / 2
pred_bboxes = result['boxes_3d'].tensor.numpy()
pred_bboxes = Box3DMode.convert(pred_bboxes, Box3DMode.LIDAR,
Box3DMode.DEPTH)
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
show_result(points, gt_bboxes, pred_bboxes, out_dir, file_name)
print(results)
def output_to_nusc_box(detection): def output_to_nusc_box(detection):
box3d = detection['boxes_3d'] box3d = detection['boxes_3d']
......
...@@ -78,7 +78,8 @@ class Collect3D(object): ...@@ -78,7 +78,8 @@ class Collect3D(object):
'pcd_horizontal_flip', 'pcd_vertical_flip', 'pcd_horizontal_flip', 'pcd_vertical_flip',
'box_mode_3d', 'box_type_3d', 'img_norm_cfg', 'box_mode_3d', 'box_type_3d', 'img_norm_cfg',
'rect', 'Trv2c', 'P2', 'pcd_trans', 'sample_idx', 'rect', 'Trv2c', 'P2', 'pcd_trans', 'sample_idx',
'pcd_scale_factor', 'pcd_rotation')): 'pcd_scale_factor', 'pcd_rotation',
'pts_filename')):
self.keys = keys self.keys = keys
self.meta_keys = meta_keys self.meta_keys = meta_keys
......
...@@ -2,6 +2,7 @@ import os.path as osp ...@@ -2,6 +2,7 @@ import os.path as osp
import numpy as np import numpy as np
from mmdet3d.core import show_result
from mmdet3d.core.bbox import DepthInstance3DBoxes from mmdet3d.core.bbox import DepthInstance3DBoxes
from mmdet.datasets import DATASETS from mmdet.datasets import DATASETS
from .custom_3d import Custom3DDataset from .custom_3d import Custom3DDataset
...@@ -63,3 +64,18 @@ class ScanNetDataset(Custom3DDataset): ...@@ -63,3 +64,18 @@ class ScanNetDataset(Custom3DDataset):
pts_instance_mask_path=pts_instance_mask_path, pts_instance_mask_path=pts_instance_mask_path,
pts_semantic_mask_path=pts_semantic_mask_path) pts_semantic_mask_path=pts_semantic_mask_path)
return anns_results return anns_results
def show(self, results, out_dir):
assert out_dir is not None, 'Expect out_dir, got none.'
for i, result in enumerate(results):
data_info = self.data_infos[i]
pts_path = data_info['pts_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points = np.fromfile(
osp.join(self.data_root, pts_path),
dtype=np.float32).reshape(-1, 6)
gt_bboxes = np.pad(data_info['annos']['gt_boxes_upright_depth'],
((0, 0), (0, 1)), 'constant')
pred_bboxes = result['boxes_3d'].tensor.numpy()
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
show_result(points, gt_bboxes, pred_bboxes, out_dir, file_name)
import os.path as osp
import numpy as np import numpy as np
from mmdet3d.core import show_result
from mmdet3d.core.bbox import DepthInstance3DBoxes from mmdet3d.core.bbox import DepthInstance3DBoxes
from mmdet.datasets import DATASETS from mmdet.datasets import DATASETS
from .custom_3d import Custom3DDataset from .custom_3d import Custom3DDataset
...@@ -48,3 +51,21 @@ class SUNRGBDDataset(Custom3DDataset): ...@@ -48,3 +51,21 @@ class SUNRGBDDataset(Custom3DDataset):
anns_results = dict( anns_results = dict(
gt_bboxes_3d=gt_bboxes_3d, gt_labels_3d=gt_labels_3d) gt_bboxes_3d=gt_bboxes_3d, gt_labels_3d=gt_labels_3d)
return anns_results return anns_results
def show(self, results, out_dir):
assert out_dir is not None, 'Expect out_dir, got none.'
for i, result in enumerate(results):
data_info = self.data_infos[i]
pts_path = data_info['pts_path']
file_name = osp.split(pts_path)[-1].split('.')[0]
points = np.fromfile(
osp.join(self.data_root, pts_path),
dtype=np.float32).reshape(-1, 6)
points[:, 3:] *= 255
if data_info['annos']['gt_num'] > 0:
gt_bboxes = data_info['annos']['gt_boxes_upright_depth']
else:
gt_bboxes = np.zeros((0, 7))
pred_bboxes = result['boxes_3d'].tensor.numpy()
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
show_result(points, gt_bboxes, pred_bboxes, out_dir, file_name)
import os.path as osp
from mmdet3d.core import Box3DMode, show_result
from mmdet.models.detectors import BaseDetector from mmdet.models.detectors import BaseDetector
...@@ -52,3 +55,23 @@ class Base3DDetector(BaseDetector): ...@@ -52,3 +55,23 @@ class Base3DDetector(BaseDetector):
return self.forward_train(**kwargs) return self.forward_train(**kwargs)
else: else:
return self.forward_test(**kwargs) return self.forward_test(**kwargs)
def show_results(self, data, result, out_dir):
points = data['points'][0]._data[0][0].numpy()
pts_filename = data['img_metas'][0]._data[0][0]['pts_filename']
file_name = osp.split(pts_filename)[-1].split('.')[0]
assert out_dir is not None, 'Expect out_dir, got none.'
pred_bboxes = result['pts_bbox']['boxes_3d'].tensor.numpy()
# for now we convert points into depth mode
if data['img_metas'][0]._data[0][0]['box_mode_3d'] != Box3DMode.DEPTH:
points = points[..., [1, 0, 2]]
points[..., 0] *= -1
pred_bboxes = Box3DMode.convert(
pred_bboxes, data['img_metas'][0]._data[0][0]['box_mode_3d'],
Box3DMode.DEPTH)
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
else:
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
show_result(points, None, pred_bboxes, out_dir, file_name)
import os.path as osp
import torch import torch
import torch.nn as nn import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from mmdet3d.core import bbox3d2result, merge_aug_bboxes_3d from mmdet3d.core import (Box3DMode, bbox3d2result, merge_aug_bboxes_3d,
show_result)
from mmdet3d.ops import Voxelization from mmdet3d.ops import Voxelization
from mmdet.core import multi_apply from mmdet.core import multi_apply
from mmdet.models import DETECTORS from mmdet.models import DETECTORS
...@@ -349,3 +352,23 @@ class MVXTwoStageDetector(Base3DDetector): ...@@ -349,3 +352,23 @@ class MVXTwoStageDetector(Base3DDetector):
merged_bboxes = merge_aug_bboxes_3d(aug_bboxes, img_metas, merged_bboxes = merge_aug_bboxes_3d(aug_bboxes, img_metas,
self.pts_bbox_head.test_cfg) self.pts_bbox_head.test_cfg)
return merged_bboxes return merged_bboxes
def show_results(self, data, result, out_dir):
points = data['points'][0]._data[0][0].numpy()
pts_filename = data['img_metas'][0]._data[0][0]['pts_filename']
file_name = osp.split(pts_filename)[-1].split('.')[0]
assert out_dir is not None, 'Expect out_dir, got none.'
inds = result['pts_bbox']['scores_3d'] > 0.1
pred_bboxes = result['pts_bbox']['boxes_3d'][inds].tensor.numpy()
# for now we convert points into depth mode
if data['img_metas'][0]._data[0][0]['box_mode_3d'] != Box3DMode.DEPTH:
points = points[..., [1, 0, 2]]
points[..., 0] *= -1
pred_bboxes = Box3DMode.convert(
pred_bboxes, data['img_metas'][0]._data[0][0]['box_mode_3d'],
Box3DMode.DEPTH)
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
else:
pred_bboxes[..., 2] += pred_bboxes[..., 5] / 2
show_result(points, None, pred_bboxes, out_dir, file_name)
networkx>=2.2,<2.3
trimesh>=2.35.39,<2.35.40
matplotlib matplotlib
mmcv>=0.6.0 mmcv>=0.6.0
numba==0.48.0 numba==0.48.0
...@@ -5,6 +7,7 @@ numpy ...@@ -5,6 +7,7 @@ numpy
nuscenes-devkit==1.0.5 nuscenes-devkit==1.0.5
# need older pillow until torchvision is fixed # need older pillow until torchvision is fixed
Pillow<=6.2.2 Pillow<=6.2.2
plyfile
six six
terminaltables terminaltables
torch>=1.3 torch>=1.3
......
...@@ -8,9 +8,10 @@ from mmcv.parallel import MMDataParallel, MMDistributedDataParallel ...@@ -8,9 +8,10 @@ from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import get_dist_info, init_dist, load_checkpoint from mmcv.runner import get_dist_info, init_dist, load_checkpoint
from tools.fuse_conv_bn import fuse_module from tools.fuse_conv_bn import fuse_module
from mmdet3d.apis import single_gpu_test
from mmdet3d.datasets import build_dataloader, build_dataset from mmdet3d.datasets import build_dataloader, build_dataset
from mmdet3d.models import build_detector from mmdet3d.models import build_detector
from mmdet.apis import multi_gpu_test, set_random_seed, single_gpu_test from mmdet.apis import multi_gpu_test, set_random_seed
from mmdet.core import wrap_fp16_model from mmdet.core import wrap_fp16_model
...@@ -38,6 +39,8 @@ def parse_args(): ...@@ -38,6 +39,8 @@ def parse_args():
help='evaluation metrics, which depends on the dataset, e.g., "bbox",' help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC') ' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results') parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where results will be saved')
parser.add_argument( parser.add_argument(
'--gpu-collect', '--gpu-collect',
action='store_true', action='store_true',
...@@ -125,7 +128,7 @@ def main(): ...@@ -125,7 +128,7 @@ def main():
if not distributed: if not distributed:
model = MMDataParallel(model, device_ids=[0]) model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show) outputs = single_gpu_test(model, data_loader, args.show, args.show_dir)
else: else:
model = MMDistributedDataParallel( model = MMDistributedDataParallel(
model.cuda(), model.cuda(),
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment