Commit c9ad3605 authored by jshilong's avatar jshilong Committed by ChaimZhu
Browse files

[Refactor]New version VoteNet

parent db44cc50
import unittest
from io import StringIO
from unittest.mock import patch
import numpy as np
import torch
from mmdet3d.core import DepthInstance3DBoxes
from mmdet3d.metrics import IndoorMetric
class TestIndoorMetric(unittest.TestCase):
@patch('sys.stdout', new_callable=StringIO)
def test_process(self, stdout):
indoor_metric = IndoorMetric()
dummy_batch = dict(data_sample=dict())
eval_ann_info = {
'gt_bboxes_3d':
DepthInstance3DBoxes(
torch.tensor([
[2.3578, 1.7841, -0.0987, 0.5532, 0.4948, 0.6474, 0.0000],
[-0.2773, -2.1403, 0.0615, 0.4786, 0.5170, 0.3842, 0.0000],
[0.0259, -2.7954, -0.0157, 0.3869, 0.4361, 0.5229, 0.0000],
[-2.3968, 1.1040, 0.0945, 2.5563, 1.5989, 0.9322, 0.0000],
[
-0.3173, -2.7770, -0.0134, 0.5473, 0.8569, 0.5577,
0.0000
],
[-2.4882, -1.4437, 0.0987, 1.2199, 0.4859, 0.6461, 0.0000],
[-3.4702, -0.1315, 0.2463, 1.3137, 0.8022, 0.4765, 0.0000],
[1.9786, 3.0196, -0.0934, 1.6129, 0.5834, 1.4662, 0.0000],
[2.3835, 2.2691, -0.1376, 0.5197, 0.5099, 0.6896, 0.0000],
[2.5986, -0.5313, 1.4269, 0.0696, 0.2933, 0.3104, 0.0000],
[0.4555, -3.1278, -0.0637, 2.0247, 0.1292, 0.2419, 0.0000],
[0.4655, -3.1941, 0.3769, 2.1132, 0.3536, 1.9803, 0.0000]
])),
'gt_labels_3d':
np.array([2, 2, 2, 3, 4, 17, 4, 7, 2, 8, 17, 11])
}
dummy_batch['data_sample']['eval_ann_info'] = eval_ann_info
pred_instances_3d = dict()
pred_instances_3d['scores_3d'] = torch.ones(
len(eval_ann_info['gt_bboxes_3d']))
pred_instances_3d['bboxes_3d'] = eval_ann_info['gt_bboxes_3d']
pred_instances_3d['labels_3d'] = torch.Tensor(
eval_ann_info['gt_labels_3d'])
pred_dict = dict()
pred_dict['pred_instances_3d'] = pred_instances_3d
indoor_metric.dataset_meta = {
'CLASSES': ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
'window', 'bookshelf', 'picture', 'counter', 'desk',
'curtain', 'refrigerator', 'showercurtrain', 'toilet',
'sink', 'bathtub', 'garbagebin'),
'box_type_3d':
'Depth',
}
indoor_metric.process([dummy_batch], [pred_dict])
eval_results = indoor_metric.evaluate(1)
for v in eval_results.values():
# map == 1
self.assertEqual(1, v)
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import random
from os.path import dirname, exists, join
import numpy as np
import pytest
import torch
from mmengine.data import InstanceData
......@@ -11,67 +6,7 @@ from mmengine.data import InstanceData
from mmdet3d.core import Det3DDataSample
from mmdet3d.core.bbox import LiDARInstance3DBoxes
from mmdet3d.registry import MODELS
def _setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection3d repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet3d
repo_dpath = dirname(dirname(mmdet3d.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_model_cfg(fname):
"""Grab configs necessary to create a model.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
import mmcv
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
train_cfg = mmcv.Config(copy.deepcopy(config.model.train_cfg))
test_cfg = mmcv.Config(copy.deepcopy(config.model.test_cfg))
model.update(train_cfg=train_cfg)
model.update(test_cfg=test_cfg)
return model
from tests.utils.model_utils import _get_detector_cfg, _setup_seed
def test_voxel_net():
......@@ -136,4 +71,43 @@ def test_voxel_net():
dict(inputs=input_dict0, data_sample=data_sample_0),
dict(inputs=input_dict1, data_sample=data_sample_1)
]
results = model.forward(data, return_loss=False)
model.forward(data, return_loss=False)
def test_sassd():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
_setup_seed(0)
sassd_cfg = _get_detector_cfg('sassd/sassd_6x8_80e_kitti-3d-3class.py')
self = build_detector(sassd_cfg).cuda()
points_0 = torch.rand([2010, 4], device='cuda')
points_1 = torch.rand([2020, 4], device='cuda')
points = [points_0, points_1]
gt_bbox_0 = LiDARInstance3DBoxes(torch.rand([10, 7], device='cuda'))
gt_bbox_1 = LiDARInstance3DBoxes(torch.rand([10, 7], device='cuda'))
gt_bboxes = [gt_bbox_0, gt_bbox_1]
gt_labels_0 = torch.randint(0, 3, [10], device='cuda')
gt_labels_1 = torch.randint(0, 3, [10], device='cuda')
gt_labels = [gt_labels_0, gt_labels_1]
img_meta_0 = dict(box_type_3d=LiDARInstance3DBoxes)
img_meta_1 = dict(box_type_3d=LiDARInstance3DBoxes)
img_metas = [img_meta_0, img_meta_1]
# test forward_train
losses = self.forward_train(points, img_metas, gt_bboxes, gt_labels)
assert losses['loss_cls'][0] >= 0
assert losses['loss_bbox'][0] >= 0
assert losses['loss_dir'][0] >= 0
assert losses['aux_loss_cls'][0] >= 0
assert losses['aux_loss_reg'][0] >= 0
# test simple_test
with torch.no_grad():
results = self.simple_test(points, img_metas)
boxes_3d = results[0]['boxes_3d']
scores_3d = results[0]['scores_3d']
labels_3d = results[0]['labels_3d']
assert boxes_3d.tensor.shape == (50, 7)
assert scores_3d.shape == torch.Size([50])
assert labels_3d.shape == torch.Size([50])
import unittest
import torch
from mmengine import DefaultScope
from mmdet3d.core import LiDARInstance3DBoxes
from mmdet3d.registry import MODELS
from tests.utils.model_utils import (_create_detector_inputs,
_get_detector_cfg, _setup_seed)
class TestVotenet(unittest.TestCase):
def test_voxel_net(self):
import mmdet3d.models
assert hasattr(mmdet3d.models, 'VoteNet')
DefaultScope.get_instance('test_vote_net', scope_name='mmdet3d')
_setup_seed(0)
voxel_net_cfg = _get_detector_cfg(
'votenet/votenet_16x8_sunrgbd-3d-10class.py')
model = MODELS.build(voxel_net_cfg)
num_gt_instance = 50
data = [_create_detector_inputs(num_gt_instance=num_gt_instance)]
aug_data = [
_create_detector_inputs(num_gt_instance=num_gt_instance),
_create_detector_inputs(num_gt_instance=num_gt_instance + 1)
]
# test_aug_test
metainfo = {
'pcd_scale_factor': 1,
'pcd_horizontal_flip': 1,
'pcd_vertical_flip': 1,
'box_type_3d': LiDARInstance3DBoxes
}
for item in aug_data:
item['data_sample'].set_metainfo(metainfo)
if torch.cuda.is_available():
model = model.cuda()
# test simple_test
with torch.no_grad():
batch_inputs, data_samples = model.data_preprocessor(
data, True)
results = model.forward(
batch_inputs, data_samples, mode='predict')
self.assertEqual(len(results), len(data))
self.assertIn('bboxes_3d', results[0].pred_instances_3d)
self.assertIn('scores_3d', results[0].pred_instances_3d)
self.assertIn('labels_3d', results[0].pred_instances_3d)
batch_inputs, data_samples = model.data_preprocessor(
aug_data, True)
aug_results = model.forward(
batch_inputs, data_samples, mode='predict')
self.assertIn('bboxes_3d', aug_results[0].pred_instances_3d)
self.assertIn('scores_3d', aug_results[0].pred_instances_3d)
self.assertIn('labels_3d', aug_results[0].pred_instances_3d)
losses = model.forward(batch_inputs, data_samples, mode='loss')
self.assertGreater(losses['vote_loss'], 0)
self.assertGreater(losses['objectness_loss'], 0)
self.assertGreater(losses['semantic_loss'], 0)
self.assertGreater(losses['dir_res_loss'], 0)
self.assertGreater(losses['size_class_loss'], 0)
self.assertGreater(losses['size_res_loss'], 0)
self.assertGreater(losses['size_res_loss'], 0)
# TODO test_aug_test
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmdet3d.core import LiDARInstance3DBoxes
# create a dummy `results` to test the pipeline
from mmdet3d.datasets import LoadAnnotations3D, LoadPointsFromFile
def create_dummy_data_info(with_ann=True):
ann_info = {
'gt_bboxes':
np.array([[712.4, 143., 810.73, 307.92]]),
'gt_labels':
np.array([1]),
'gt_bboxes_3d':
LiDARInstance3DBoxes(
np.array(
[[8.7314, -1.8559, -1.5997, 1.2000, 0.4800, 1.8900,
-1.5808]])),
'gt_labels_3d':
np.array([1]),
'num_lidar_pts':
np.array([377]),
'difficulty':
np.array([0]),
'truncated':
np.array([0]),
'occluded':
np.array([0]),
'alpha':
np.array([-0.2]),
'score':
np.array([0.]),
'index':
np.array([0]),
'group_id':
np.array([0])
}
data_info = {
'sample_id':
0,
'images': {
'CAM0': {
'cam2img': [[707.0493, 0.0, 604.0814, 0.0],
[0.0, 707.0493, 180.5066, 0.0],
[0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]
},
'CAM1': {
'cam2img': [[707.0493, 0.0, 604.0814, -379.7842],
[0.0, 707.0493, 180.5066, 0.0],
[0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]
},
'CAM2': {
'img_path':
'tests/data/kitti/training/image_2/000000.png',
'height':
370,
'width':
1224,
'cam2img': [[707.0493, 0.0, 604.0814, 45.75831],
[0.0, 707.0493, 180.5066, -0.3454157],
[0.0, 0.0, 1.0, 0.004981016], [0.0, 0.0, 0.0, 1.0]]
},
'CAM3': {
'cam2img': [[707.0493, 0.0, 604.0814, -334.1081],
[0.0, 707.0493, 180.5066, 2.33066],
[0.0, 0.0, 1.0, 0.003201153], [0.0, 0.0, 0.0, 1.0]]
},
'R0_rect': [[
0.9999127984046936, 0.010092630051076412,
-0.008511931635439396, 0.0
],
[
-0.010127290152013302, 0.9999405741691589,
-0.004037670791149139, 0.0
],
[
0.008470674976706505, 0.0041235219687223434,
0.9999555945396423, 0.0
], [0.0, 0.0, 0.0, 1.0]]
},
'lidar_points': {
'num_pts_feats':
4,
'lidar_path':
'tests/data/kitti/training/velodyne_reduced/000000.bin',
'lidar2cam': [[
-0.0015960992313921452, -0.9999162554740906,
-0.012840436771512032, -0.022366708144545555
],
[
-0.00527064548805356, 0.012848696671426296,
-0.9999035596847534, -0.05967890843749046
],
[
0.9999848008155823, -0.0015282672829926014,
-0.005290712229907513, -0.33254900574684143
], [0.0, 0.0, 0.0, 1.0]],
'Tr_velo_to_cam': [[
0.006927963811904192, -0.9999722242355347, -0.0027578289154917,
-0.024577289819717407
],
[
-0.0011629819637164474,
0.0027498360723257065, -0.9999955296516418,
-0.06127237156033516
],
[
0.999975323677063, 0.006931141018867493,
-0.0011438990477472544, -0.33210289478302
], [0.0, 0.0, 0.0, 1.0]],
'Tr_imu_to_velo': [[
0.999997615814209, 0.0007553070900030434,
-0.002035825978964567, -0.8086758852005005
],
[
-0.0007854027207940817, 0.9998897910118103,
-0.014822980388998985, 0.3195559084415436
],
[
0.002024406101554632, 0.014824540354311466,
0.9998881220817566, -0.7997230887413025
], [0.0, 0.0, 0.0, 1.0]]
},
'instances': [{
'bbox': [712.4, 143.0, 810.73, 307.92],
'bbox_label':
-1,
'bbox_3d': [
1.840000033378601, 1.4700000286102295, 8.40999984741211,
1.2000000476837158, 1.8899999856948853, 0.47999998927116394,
0.009999999776482582
],
'bbox_label_3d':
-1,
'num_lidar_pts':
377,
'difficulty':
0,
'truncated':
0,
'occluded':
0,
'alpha':
-0.2,
'score':
0.0,
'index':
0,
'group_id':
0
}],
'plane':
None
}
if with_ann:
data_info['ann_info'] = ann_info
return data_info
def create_data_info_after_loading():
load_anns_transform = LoadAnnotations3D(
with_bbox_3d=True, with_label_3d=True)
load_points_transform = LoadPointsFromFile(
coord_type='LIDAR', load_dim=4, use_dim=3)
data_info = create_dummy_data_info()
data_info = load_points_transform(data_info)
data_info_after_loading = load_anns_transform(data_info)
return data_info_after_loading
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import random
from os.path import dirname, exists, join
import numpy as np
import torch
from mmengine import InstanceData
from mmdet3d.core import Det3DDataSample, LiDARInstance3DBoxes
def _setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection3d repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet3d
repo_dpath = dirname(dirname(mmdet3d.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_model_cfg(fname):
"""Grab configs necessary to create a model.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
import mmcv
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
train_cfg = mmcv.Config(copy.deepcopy(config.model.train_cfg))
test_cfg = mmcv.Config(copy.deepcopy(config.model.test_cfg))
model.update(train_cfg=train_cfg)
model.update(test_cfg=test_cfg)
return model
def _create_detector_inputs(seed=0,
num_gt_instance=20,
points_feat_dim=4,
num_classes=3):
_setup_seed(seed)
inputs_dict = dict(points=torch.rand([10, points_feat_dim]))
gt_instance_3d = InstanceData()
gt_instance_3d.bboxes_3d = LiDARInstance3DBoxes(
torch.rand([num_gt_instance, 7]))
gt_instance_3d.labels_3d = torch.randint(0, num_classes, [num_gt_instance])
data_sample = Det3DDataSample(
metainfo=dict(box_type_3d=LiDARInstance3DBoxes))
data_sample.gt_instances_3d = gt_instance_3d
data_sample.seg_data = dict()
return dict(inputs=inputs_dict, data_sample=data_sample)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment