test_indoor_pipeline.py 6.97 KB
Newer Older
liyinhao's avatar
liyinhao committed
1
2
import mmcv
import numpy as np
wuyuefeng's avatar
wuyuefeng committed
3
import torch
zhangwenwei's avatar
zhangwenwei committed
4
from os import path as osp
liyinhao's avatar
liyinhao committed
5

wuyuefeng's avatar
wuyuefeng committed
6
from mmdet3d.core.bbox import DepthInstance3DBoxes
liyinhao's avatar
liyinhao committed
7
8
9
10
from mmdet3d.datasets.pipelines import Compose


def test_scannet_pipeline():
11
12
13
14
15
    class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
                   'window', 'bookshelf', 'picture', 'counter', 'desk',
                   'curtain', 'refrigerator', 'showercurtrain', 'toilet',
                   'sink', 'bathtub', 'garbagebin')

liyinhao's avatar
liyinhao committed
16
17
18
    np.random.seed(0)
    pipelines = [
        dict(
zhangwenwei's avatar
zhangwenwei committed
19
20
            type='LoadPointsFromFile',
            shift_height=True,
liyinhao's avatar
liyinhao committed
21
22
            load_dim=6,
            use_dim=[0, 1, 2]),
zhangwenwei's avatar
zhangwenwei committed
23
24
25
26
27
28
        dict(
            type='LoadAnnotations3D',
            with_bbox_3d=True,
            with_label_3d=True,
            with_mask_3d=True,
            with_seg_3d=True),
liyinhao's avatar
liyinhao committed
29
30
        dict(type='IndoorPointSample', num_points=5),
        dict(
wuyuefeng's avatar
wuyuefeng committed
31
32
33
34
35
36
37
38
39
            type='RandomFlip3D',
            sync_2d=False,
            flip_ratio_bev_horizontal=1.0,
            flip_ratio_bev_vertical=1.0),
        dict(
            type='GlobalRotScaleTrans',
            rot_range=[-0.087266, 0.087266],
            scale_ratio_range=[1.0, 1.0],
            shift_height=True),
40
41
42
43
        dict(type='DefaultFormatBundle3D', class_names=class_names),
        dict(
            type='Collect3D',
            keys=[
liyinhao's avatar
liyinhao committed
44
                'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask',
45
46
                'pts_instance_mask'
            ]),
liyinhao's avatar
liyinhao committed
47
48
    ]
    pipeline = Compose(pipelines)
yinchimaoliang's avatar
yinchimaoliang committed
49
    info = mmcv.load('./tests/data/scannet/scannet_infos.pkl')[0]
liyinhao's avatar
liyinhao committed
50
    results = dict()
liyinhao's avatar
liyinhao committed
51
52
    data_path = './tests/data/scannet'
    results['pts_filename'] = osp.join(data_path, info['pts_path'])
liyinhao's avatar
liyinhao committed
53
    if info['annos']['gt_num'] != 0:
liyinhao's avatar
liyinhao committed
54
55
56
        scannet_gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
            np.float32)
        scannet_gt_labels_3d = info['annos']['class'].astype(np.long)
liyinhao's avatar
liyinhao committed
57
58
    else:
        scannet_gt_bboxes_3d = np.zeros((1, 6), dtype=np.float32)
liyinhao's avatar
liyinhao committed
59
        scannet_gt_labels_3d = np.zeros((1, ), dtype=np.long)
zhangwenwei's avatar
zhangwenwei committed
60
61
    results['ann_info'] = dict()
    results['ann_info']['pts_instance_mask_path'] = osp.join(
liyinhao's avatar
liyinhao committed
62
        data_path, info['pts_instance_mask_path'])
zhangwenwei's avatar
zhangwenwei committed
63
    results['ann_info']['pts_semantic_mask_path'] = osp.join(
liyinhao's avatar
liyinhao committed
64
        data_path, info['pts_semantic_mask_path'])
wuyuefeng's avatar
wuyuefeng committed
65
66
    results['ann_info']['gt_bboxes_3d'] = DepthInstance3DBoxes(
        scannet_gt_bboxes_3d, box_dim=6, with_yaw=False)
zhangwenwei's avatar
zhangwenwei committed
67
    results['ann_info']['gt_labels_3d'] = scannet_gt_labels_3d
68

wuyuefeng's avatar
wuyuefeng committed
69
    results['img_fields'] = []
zhangwenwei's avatar
zhangwenwei committed
70
71
72
    results['bbox3d_fields'] = []
    results['pts_mask_fields'] = []
    results['pts_seg_fields'] = []
73

liyinhao's avatar
liyinhao committed
74
    results = pipeline(results)
75
76
77

    points = results['points']._data
    gt_bboxes_3d = results['gt_bboxes_3d']._data
liyinhao's avatar
liyinhao committed
78
    gt_labels_3d = results['gt_labels_3d']._data
79
80
    pts_semantic_mask = results['pts_semantic_mask']._data
    pts_instance_mask = results['pts_instance_mask']._data
wuyuefeng's avatar
wuyuefeng committed
81
82
83
84
85
    expected_points = torch.tensor([[-2.7231, -2.2068, 2.3543, 2.3895],
                                    [-0.4065, -3.4857, 2.1330, 2.1682],
                                    [-1.4578, 1.3510, -0.0441, -0.0089],
                                    [2.2428, -1.1323, -0.0288, 0.0064],
                                    [0.7052, -2.9752, 1.5560, 1.5912]])
wuyuefeng's avatar
wuyuefeng committed
86
    expected_gt_bboxes_3d = torch.tensor(
wuyuefeng's avatar
wuyuefeng committed
87
88
89
90
91
        [[-1.1835, -3.6317, 1.8565, 1.7577, 0.3761, 0.5724, 0.0000],
         [-3.1832, 3.2269, 1.5268, 0.6727, 0.2251, 0.6715, 0.0000],
         [-0.9598, -2.2864, 0.6165, 0.7506, 2.5709, 1.2145, 0.0000],
         [-2.6988, -2.7354, 0.9722, 0.7680, 1.8877, 0.2870, 0.0000],
         [3.2989, 0.2885, 1.0712, 0.7600, 3.8814, 2.1603, 0.0000]])
liyinhao's avatar
liyinhao committed
92
    expected_gt_labels_3d = np.array([
liyinhao's avatar
liyinhao committed
93
94
95
96
97
        6, 6, 4, 9, 11, 11, 10, 0, 15, 17, 17, 17, 3, 12, 4, 4, 14, 1, 0, 0, 0,
        0, 0, 0, 5, 5, 5
    ])
    expected_pts_semantic_mask = np.array([3, 1, 2, 2, 15])
    expected_pts_instance_mask = np.array([44, 22, 10, 10, 57])
wuyuefeng's avatar
wuyuefeng committed
98
    assert torch.allclose(points, expected_points, 1e-2)
wuyuefeng's avatar
wuyuefeng committed
99
100
    assert torch.allclose(gt_bboxes_3d.tensor[:5, :], expected_gt_bboxes_3d,
                          1e-2)
liyinhao's avatar
liyinhao committed
101
    assert np.all(gt_labels_3d.numpy() == expected_gt_labels_3d)
102
103
    assert np.all(pts_semantic_mask.numpy() == expected_pts_semantic_mask)
    assert np.all(pts_instance_mask.numpy() == expected_pts_instance_mask)
yinchimaoliang's avatar
yinchimaoliang committed
104
105
106


def test_sunrgbd_pipeline():
107
108
    class_names = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk',
                   'dresser', 'night_stand', 'bookshelf', 'bathtub')
yinchimaoliang's avatar
yinchimaoliang committed
109
110
111
    np.random.seed(0)
    pipelines = [
        dict(
zhangwenwei's avatar
zhangwenwei committed
112
113
            type='LoadPointsFromFile',
            shift_height=True,
yinchimaoliang's avatar
yinchimaoliang committed
114
115
            load_dim=6,
            use_dim=[0, 1, 2]),
zhangwenwei's avatar
zhangwenwei committed
116
        dict(type='LoadAnnotations3D'),
yinchimaoliang's avatar
yinchimaoliang committed
117
        dict(
wuyuefeng's avatar
wuyuefeng committed
118
119
120
121
122
123
124
125
126
            type='RandomFlip3D',
            sync_2d=False,
            flip_ratio_bev_horizontal=1.0,
        ),
        dict(
            type='GlobalRotScaleTrans',
            rot_range=[-0.523599, 0.523599],
            scale_ratio_range=[0.85, 1.15],
            shift_height=True),
yinchimaoliang's avatar
yinchimaoliang committed
127
        dict(type='IndoorPointSample', num_points=5),
128
        dict(type='DefaultFormatBundle3D', class_names=class_names),
liyinhao's avatar
liyinhao committed
129
130
        dict(
            type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']),
yinchimaoliang's avatar
yinchimaoliang committed
131
132
133
134
    ]
    pipeline = Compose(pipelines)
    results = dict()
    info = mmcv.load('./tests/data/sunrgbd/sunrgbd_infos.pkl')[0]
liyinhao's avatar
liyinhao committed
135
136
    data_path = './tests/data/sunrgbd'
    results['pts_filename'] = osp.join(data_path, info['pts_path'])
yinchimaoliang's avatar
yinchimaoliang committed
137
138

    if info['annos']['gt_num'] != 0:
liyinhao's avatar
liyinhao committed
139
140
141
        gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
            np.float32)
        gt_labels_3d = info['annos']['class'].astype(np.long)
yinchimaoliang's avatar
yinchimaoliang committed
142
    else:
wuyuefeng's avatar
wuyuefeng committed
143
        gt_bboxes_3d = np.zeros((1, 7), dtype=np.float32)
liyinhao's avatar
liyinhao committed
144
        gt_labels_3d = np.zeros((1, ), dtype=np.long)
145

zhangwenwei's avatar
zhangwenwei committed
146
147
    # prepare input of pipeline
    results['ann_info'] = dict()
wuyuefeng's avatar
wuyuefeng committed
148
    results['ann_info']['gt_bboxes_3d'] = DepthInstance3DBoxes(gt_bboxes_3d)
zhangwenwei's avatar
zhangwenwei committed
149
    results['ann_info']['gt_labels_3d'] = gt_labels_3d
wuyuefeng's avatar
wuyuefeng committed
150
    results['img_fields'] = []
zhangwenwei's avatar
zhangwenwei committed
151
152
153
154
    results['bbox3d_fields'] = []
    results['pts_mask_fields'] = []
    results['pts_seg_fields'] = []

yinchimaoliang's avatar
yinchimaoliang committed
155
    results = pipeline(results)
156
157
    points = results['points']._data
    gt_bboxes_3d = results['gt_bboxes_3d']._data
liyinhao's avatar
liyinhao committed
158
    gt_labels_3d = results['gt_labels_3d']._data
wuyuefeng's avatar
wuyuefeng committed
159
160
161
162
163
    expected_points = torch.tensor([[0.8678, 1.3470, 0.1105, 0.0905],
                                    [0.8707, 1.3635, 0.0437, 0.0238],
                                    [0.8636, 1.3511, 0.0504, 0.0304],
                                    [0.8690, 1.3461, 0.1265, 0.1065],
                                    [0.8668, 1.3434, 0.1216, 0.1017]])
wuyuefeng's avatar
wuyuefeng committed
164
    expected_gt_bboxes_3d = torch.tensor(
wuyuefeng's avatar
wuyuefeng committed
165
166
167
        [[-1.2136, 4.0206, -0.2412, 2.2493, 1.8444, 1.9245, 1.3989],
         [-2.7420, 4.5777, -0.7686, 0.5718, 0.8629, 0.9510, 1.4446],
         [0.9729, 1.9087, -0.1443, 0.6965, 1.5273, 2.0563, 2.9924]])
liyinhao's avatar
liyinhao committed
168
    expected_gt_labels_3d = np.array([0, 7, 6])
wuyuefeng's avatar
wuyuefeng committed
169
    assert torch.allclose(gt_bboxes_3d.tensor, expected_gt_bboxes_3d, 1e-3)
liyinhao's avatar
liyinhao committed
170
    assert np.allclose(gt_labels_3d.flatten(), expected_gt_labels_3d)
wuyuefeng's avatar
wuyuefeng committed
171
    assert torch.allclose(points, expected_points, 1e-2)