Commit 2dedcc20 authored by liyinhao's avatar liyinhao Committed by zhangwenwei
Browse files

Add pipeline unittest

parent 32f3955c
......@@ -6,9 +6,9 @@ from .lyft_dataset import LyftDataset
from .nuscenes_dataset import NuScenesDataset
from .pipelines import (GlobalRotScaleTrans, IndoorPointSample,
LoadAnnotations3D, LoadPointsFromFile,
NormalizePointsColor, ObjectNoise, ObjectRangeFilter,
ObjectSample, PointShuffle, PointsRangeFilter,
RandomFlip3D)
LoadPointsFromMultiSweeps, NormalizePointsColor,
ObjectNoise, ObjectRangeFilter, ObjectSample,
PointShuffle, PointsRangeFilter, RandomFlip3D)
from .scannet_dataset import ScanNetDataset
from .sunrgbd_dataset import SUNRGBDDataset
......@@ -19,5 +19,6 @@ __all__ = [
'ObjectSample', 'RandomFlip3D', 'ObjectNoise', 'GlobalRotScaleTrans',
'PointShuffle', 'ObjectRangeFilter', 'PointsRangeFilter', 'Collect3D',
'LoadPointsFromFile', 'NormalizePointsColor', 'IndoorPointSample',
'LoadAnnotations3D', 'SUNRGBDDataset', 'ScanNetDataset', 'Custom3DDataset'
'LoadAnnotations3D', 'SUNRGBDDataset', 'ScanNetDataset', 'Custom3DDataset',
'LoadPointsFromMultiSweeps'
]
......@@ -2,8 +2,8 @@ from mmdet.datasets.pipelines import Compose
from .dbsampler import DataBaseSampler
from .formating import Collect3D, DefaultFormatBundle, DefaultFormatBundle3D
from .loading import (LoadAnnotations3D, LoadMultiViewImageFromFiles,
LoadPointsFromFile, NormalizePointsColor,
PointSegClassMapping)
LoadPointsFromFile, LoadPointsFromMultiSweeps,
NormalizePointsColor, PointSegClassMapping)
from .test_time_aug import MultiScaleFlipAug3D
from .transforms_3d import (GlobalRotScaleTrans, IndoorPointSample,
ObjectNoise, ObjectRangeFilter, ObjectSample,
......@@ -15,5 +15,5 @@ __all__ = [
'Compose', 'LoadMultiViewImageFromFiles', 'LoadPointsFromFile',
'DefaultFormatBundle', 'DefaultFormatBundle3D', 'DataBaseSampler',
'NormalizePointsColor', 'LoadAnnotations3D', 'IndoorPointSample',
'PointSegClassMapping', 'MultiScaleFlipAug3D'
'PointSegClassMapping', 'MultiScaleFlipAug3D', 'LoadPointsFromMultiSweeps'
]
......@@ -230,11 +230,11 @@ class LoadPointsFromFile(object):
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += '(shift_height={})'.format(self.shift_height)
repr_str += '(mean_color={})'.format(self.color_mean)
repr_str += '(load_dim={})'.format(self.load_dim)
repr_str += '(use_dim={})'.format(self.use_dim)
repr_str = self.__class__.__name__ + '('
repr_str += 'shift_height={}, '.format(self.shift_height)
repr_str += 'file_client_args={}), '.format(self.file_client_args)
repr_str += 'load_dim={}, '.format(self.load_dim)
repr_str += 'use_dim={})'.format(self.use_dim)
return repr_str
......@@ -354,13 +354,13 @@ class LoadAnnotations3D(LoadAnnotations):
def __repr__(self):
indent_str = ' '
repr_str = self.__class__.__name__ + '(\n'
repr_str += f'{indent_str}with_bbox_3d={self.with_bbox_3d},\n'
repr_str += f'{indent_str}with_label_3d={self.with_label_3d},\n'
repr_str += f'{indent_str}with_mask_3d={self.with_mask_3d},\n'
repr_str += f'{indent_str}with_seg_3d={self.with_seg_3d},\n'
repr_str += f'{indent_str}with_bbox={self.with_bbox},\n'
repr_str += f'{indent_str}with_label={self.with_label},\n'
repr_str += f'{indent_str}with_mask={self.with_mask},\n'
repr_str += f'{indent_str}with_seg={self.with_seg},\n'
repr_str += f'{indent_str}with_bbox_3d={self.with_bbox_3d}, '
repr_str += f'{indent_str}with_label_3d={self.with_label_3d}, '
repr_str += f'{indent_str}with_mask_3d={self.with_mask_3d}, '
repr_str += f'{indent_str}with_seg_3d={self.with_seg_3d}, '
repr_str += f'{indent_str}with_bbox={self.with_bbox}, '
repr_str += f'{indent_str}with_label={self.with_label}, '
repr_str += f'{indent_str}with_mask={self.with_mask}, '
repr_str += f'{indent_str}with_seg={self.with_seg}, '
repr_str += f'{indent_str}poly2mask={self.poly2mask})'
return repr_str
import mmcv
import numpy as np
from mmdet3d.datasets.pipelines.data_augment_utils import (
noise_per_object_v3_, points_transform_)
def test_noise_per_object_v3_():
np.random.seed(0)
points = np.fromfile(
'./tests/data/kitti/training/velodyne_reduced/000000.bin',
np.float32).reshape(-1, 4)
annos = mmcv.load('./tests/data/kitti/kitti_infos_train.pkl')
info = annos[0]
annos = info['annos']
loc = annos['location']
dims = annos['dimensions']
rots = annos['rotation_y']
gt_bboxes_3d = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1).astype(np.float32)
noise_per_object_v3_(gt_boxes=gt_bboxes_3d, points=points)
expected_gt_bboxes_3d = np.array(
[[3.3430212, 2.1475432, 9.388738, 1.2, 1.89, 0.48, 0.05056486]])
assert points.shape == (800, 4)
assert np.allclose(gt_bboxes_3d, expected_gt_bboxes_3d)
def test_points_transform():
points = np.array([[46.5090, 6.1140, -0.7790, 0.0000],
[42.9490, 6.4050, -0.7050, 0.0000],
[42.9010, 6.5360, -0.7050, 0.0000],
[46.1960, 6.0960, -1.0100, 0.0000],
[43.3080, 6.2680, -0.9360, 0.0000]])
gt_boxes = np.array([[
1.5340e+01, 8.4691e+00, -1.6855e+00, 1.6400e+00, 3.7000e+00,
1.4900e+00, 3.1300e+00
],
[
1.7999e+01, 8.2386e+00, -1.5802e+00, 1.5500e+00,
4.0200e+00, 1.5200e+00, 3.1300e+00
],
[
2.9620e+01, 8.2617e+00, -1.6185e+00, 1.7800e+00,
4.2500e+00, 1.9000e+00, -3.1200e+00
],
[
4.8218e+01, 7.8035e+00, -1.3790e+00, 1.6400e+00,
3.7000e+00, 1.5200e+00, -1.0000e-02
],
[
3.3079e+01, -8.4817e+00, -1.3092e+00, 4.3000e-01,
1.7000e+00, 1.6200e+00, -1.5700e+00
]])
point_masks = np.array([[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False],
[False, False, False, False, False]])
loc_transforms = np.array([[-1.8635, -0.2774, -0.1774],
[-1.0297, -1.0302, -0.3062],
[1.6680, 0.2597, 0.0551],
[0.2230, 0.7257, -0.0097],
[-0.1403, 0.8300, 0.3431]])
rot_transforms = np.array([0.6888, -0.3858, 0.1910, -0.0044, -0.0036])
valid_mask = np.array([True, True, True, True, True])
points_transform_(points, gt_boxes[:, :3], point_masks, loc_transforms,
rot_transforms, valid_mask)
assert points.shape == (5, 4)
assert gt_boxes.shape == (5, 7)
......@@ -52,5 +52,8 @@ def test_indoor_sample():
sunrgbd_results = sunrgbd_sample_points(sunrgbd_results)
sunrgbd_choices = np.array([2, 8, 4, 9, 1])
sunrgbd_points_result = sunrgbd_results['points']
repr_str = repr(sunrgbd_sample_points)
expected_repr_str = 'IndoorPointSample(num_points=5)'
assert repr_str == expected_repr_str
assert np.allclose(sunrgbd_point_cloud[sunrgbd_choices],
sunrgbd_points_result)
......@@ -4,7 +4,8 @@ import pytest
from os import path as osp
from mmdet3d.core.bbox import DepthInstance3DBoxes
from mmdet3d.datasets.pipelines import LoadAnnotations3D, LoadPointsFromFile
from mmdet3d.datasets.pipelines import (LoadAnnotations3D, LoadPointsFromFile,
LoadPointsFromMultiSweeps)
def test_load_points_from_indoor_file():
......@@ -29,6 +30,11 @@ def test_load_points_from_indoor_file():
scannet_info['pts_path'])
scannet_results = scannet_load_data(scannet_results)
scannet_point_cloud = scannet_results['points']
repr_str = repr(scannet_load_data)
expected_repr_str = 'LoadPointsFromFile(shift_height=True, ' \
'file_client_args={\'backend\': \'disk\'}), ' \
'load_dim=6, use_dim=[0, 1, 2])'
assert repr_str == expected_repr_str
assert scannet_point_cloud.shape == (100, 4)
......@@ -93,7 +99,39 @@ def test_load_annotations3D():
scannet_pts_instance_mask = scannet_results['pts_instance_mask']
scannet_pts_semantic_mask = scannet_results['pts_semantic_mask']
repr_str = repr(scannet_load_annotations3D)
expected_repr_str = 'LoadAnnotations3D(\n with_bbox_3d=True, ' \
'with_label_3d=True, with_mask_3d=True, ' \
'with_seg_3d=True, with_bbox=False, ' \
'with_label=False, with_mask=False, ' \
'with_seg=False, poly2mask=True)'
assert repr_str == expected_repr_str
assert scannet_gt_boxes.tensor.shape == (27, 7)
assert scannet_gt_lbaels.shape == (27, )
assert scannet_pts_instance_mask.shape == (100, )
assert scannet_pts_semantic_mask.shape == (100, )
def test_load_points_from_multi_sweeps():
load_points_from_multi_sweeps = LoadPointsFromMultiSweeps()
sweep = dict(
data_path='./tests/data/nuscenes/sweeps/LIDAR_TOP/'
'n008-2018-09-18-12-07-26-0400__LIDAR_TOP__1537287083900561.pcd.bin',
timestamp=1537290014899034,
sensor2lidar_translation=[-0.02344713, -3.88266051, -0.17151584],
sensor2lidar_rotation=np.array(
[[9.99979347e-01, 3.99870769e-04, 6.41441690e-03],
[-4.42034222e-04, 9.99978299e-01, 6.57316197e-03],
[-6.41164929e-03, -6.57586161e-03, 9.99957824e-01]]))
results = dict(
points=np.array([[1., 2., 3., 4., 5.], [1., 2., 3., 4., 5.],
[1., 2., 3., 4., 5.]]),
timestamp=1537290014899034,
sweeps=[sweep])
results = load_points_from_multi_sweeps(results)
points = results['points']
repr_str = repr(load_points_from_multi_sweeps)
expected_repr_str = 'LoadPointsFromMultiSweeps(sweeps_num=10)'
assert repr_str == expected_repr_str
assert points.shape == (403, 4)
import mmcv
import numpy as np
import torch
from mmdet3d.core import Box3DMode, CameraInstance3DBoxes
from mmdet3d.datasets import ObjectNoise, ObjectSample
def test_remove_points_in_boxes():
points = np.array([[68.1370, 3.3580, 2.5160, 0.0000],
[67.6970, 3.5500, 2.5010, 0.0000],
[67.6490, 3.7600, 2.5000, 0.0000],
[66.4140, 3.9010, 2.4590, 0.0000],
[66.0120, 4.0850, 2.4460, 0.0000],
[65.8340, 4.1780, 2.4400, 0.0000],
[65.8410, 4.3860, 2.4400, 0.0000],
[65.7450, 4.5870, 2.4380, 0.0000],
[65.5510, 4.7800, 2.4320, 0.0000],
[65.4860, 4.9820, 2.4300, 0.0000]])
boxes = np.array(
[[30.0285, 10.5110, -1.5304, 0.5100, 0.8700, 1.6000, 1.6400],
[7.8369, 1.6053, -1.5605, 0.5800, 1.2300, 1.8200, -3.1000],
[10.8740, -1.0827, -1.3310, 0.6000, 0.5200, 1.7100, 1.3500],
[14.9783, 2.2466, -1.4950, 0.6100, 0.7300, 1.5300, -1.9200],
[11.0656, 0.6195, -1.5202, 0.6600, 1.0100, 1.7600, -1.4600],
[10.5994, -7.9049, -1.4980, 0.5300, 1.9600, 1.6800, 1.5600],
[28.7068, -8.8244, -1.1485, 0.6500, 1.7900, 1.7500, 3.1200],
[20.2630, 5.1947, -1.4799, 0.7300, 1.7600, 1.7300, 1.5100],
[18.2496, 3.1887, -1.6109, 0.5600, 1.6800, 1.7100, 1.5600],
[7.7396, -4.3245, -1.5801, 0.5600, 1.7900, 1.8000, -0.8300]])
points = ObjectSample.remove_points_in_boxes(points, boxes)
assert points.shape == (10, 4)
def test_object_sample():
db_sampler = mmcv.ConfigDict({
'data_root': './tests/data/kitti/',
'info_path': './tests/data/kitti/kitti_dbinfos_train.pkl',
'rate': 1.0,
'prepare': {
'filter_by_difficulty': [-1],
'filter_by_min_points': {
'Pedestrian': 10
}
},
'classes': ['Pedestrian', 'Cyclist', 'Car'],
'sample_groups': {
'Pedestrian': 6
}
})
object_sample = ObjectSample(db_sampler)
points = np.fromfile(
'./tests/data/kitti/training/velodyne_reduced/000000.bin',
np.float32).reshape(-1, 4)
annos = mmcv.load('./tests/data/kitti/kitti_infos_train.pkl')
info = annos[0]
rect = info['calib']['R0_rect'].astype(np.float32)
Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)
annos = info['annos']
loc = annos['location']
dims = annos['dimensions']
rots = annos['rotation_y']
gt_names = annos['name']
gt_bboxes_3d = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1).astype(np.float32)
gt_bboxes_3d = CameraInstance3DBoxes(gt_bboxes_3d).convert_to(
Box3DMode.LIDAR, np.linalg.inv(rect @ Trv2c))
CLASSES = ('car', 'pedestrian', 'cyclist')
gt_labels = []
for cat in gt_names:
if cat in CLASSES:
gt_labels.append(CLASSES.index(cat))
else:
gt_labels.append(-1)
input_dict = dict(
points=points, gt_bboxes_3d=gt_bboxes_3d, gt_labels_3d=gt_labels)
input_dict = object_sample(input_dict)
points = input_dict['points']
gt_bboxes_3d = input_dict['gt_bboxes_3d']
gt_labels_3d = input_dict['gt_labels_3d']
expected_gt_bboxes_3d = torch.tensor(
[[8.7314, -1.8559, -1.5997, 0.4800, 1.2000, 1.8900, 0.0100],
[8.7314, -1.8559, -1.5997, 0.4800, 1.2000, 1.8900, 0.0100]])
expected_gt_labels_3d = np.array([-1, 0])
repr_str = repr(object_sample)
expected_repr_str = 'ObjectSample'
assert repr_str == expected_repr_str
assert points.shape == (1177, 4)
assert torch.allclose(gt_bboxes_3d.tensor, expected_gt_bboxes_3d)
assert np.all(gt_labels_3d == expected_gt_labels_3d)
def test_object_noise():
np.random.seed(0)
object_noise = ObjectNoise()
points = np.fromfile(
'./tests/data/kitti/training/velodyne_reduced/000000.bin',
np.float32).reshape(-1, 4)
annos = mmcv.load('./tests/data/kitti/kitti_infos_train.pkl')
info = annos[0]
rect = info['calib']['R0_rect'].astype(np.float32)
Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)
annos = info['annos']
loc = annos['location']
dims = annos['dimensions']
rots = annos['rotation_y']
gt_bboxes_3d = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1).astype(np.float32)
gt_bboxes_3d = CameraInstance3DBoxes(gt_bboxes_3d).convert_to(
Box3DMode.LIDAR, np.linalg.inv(rect @ Trv2c))
input_dict = dict(points=points, gt_bboxes_3d=gt_bboxes_3d)
input_dict = object_noise(input_dict)
points = input_dict['points']
gt_bboxes_3d = input_dict['gt_bboxes_3d'].tensor
expected_gt_bboxes_3d = torch.tensor(
[[9.1724, -1.7559, -1.3550, 0.4800, 1.2000, 1.8900, 0.0505]])
repr_str = repr(object_noise)
expected_repr_str = 'ObjectNoise(num_try=100, ' \
'translation_std=[0.25, 0.25, 0.25], ' \
'global_rot_range=[0.0, 0.0], ' \
'rot_range=[-0.15707963267, 0.15707963267])'
assert repr_str == expected_repr_str
assert points.shape == (800, 4)
assert torch.allclose(gt_bboxes_3d, expected_gt_bboxes_3d, 1e-3)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment