test_indoor_pipeline.py 6.97 KB
Newer Older
liyinhao's avatar
liyinhao committed
1
2
3
4
import os.path as osp

import mmcv
import numpy as np
wuyuefeng's avatar
wuyuefeng committed
5
import torch
liyinhao's avatar
liyinhao committed
6

wuyuefeng's avatar
wuyuefeng committed
7
from mmdet3d.core.bbox import DepthInstance3DBoxes
liyinhao's avatar
liyinhao committed
8
9
10
11
from mmdet3d.datasets.pipelines import Compose


def test_scannet_pipeline():
12
13
14
15
16
    class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
                   'window', 'bookshelf', 'picture', 'counter', 'desk',
                   'curtain', 'refrigerator', 'showercurtrain', 'toilet',
                   'sink', 'bathtub', 'garbagebin')

liyinhao's avatar
liyinhao committed
17
18
19
    np.random.seed(0)
    pipelines = [
        dict(
zhangwenwei's avatar
zhangwenwei committed
20
21
            type='LoadPointsFromFile',
            shift_height=True,
liyinhao's avatar
liyinhao committed
22
23
            load_dim=6,
            use_dim=[0, 1, 2]),
zhangwenwei's avatar
zhangwenwei committed
24
25
26
27
28
29
        dict(
            type='LoadAnnotations3D',
            with_bbox_3d=True,
            with_label_3d=True,
            with_mask_3d=True,
            with_seg_3d=True),
liyinhao's avatar
liyinhao committed
30
31
        dict(type='IndoorPointSample', num_points=5),
        dict(
wuyuefeng's avatar
wuyuefeng committed
32
33
34
35
36
37
38
39
40
            type='RandomFlip3D',
            sync_2d=False,
            flip_ratio_bev_horizontal=1.0,
            flip_ratio_bev_vertical=1.0),
        dict(
            type='GlobalRotScaleTrans',
            rot_range=[-0.087266, 0.087266],
            scale_ratio_range=[1.0, 1.0],
            shift_height=True),
41
42
43
44
        dict(type='DefaultFormatBundle3D', class_names=class_names),
        dict(
            type='Collect3D',
            keys=[
liyinhao's avatar
liyinhao committed
45
                'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask',
46
47
                'pts_instance_mask'
            ]),
liyinhao's avatar
liyinhao committed
48
49
    ]
    pipeline = Compose(pipelines)
yinchimaoliang's avatar
yinchimaoliang committed
50
    info = mmcv.load('./tests/data/scannet/scannet_infos.pkl')[0]
liyinhao's avatar
liyinhao committed
51
    results = dict()
liyinhao's avatar
liyinhao committed
52
53
    data_path = './tests/data/scannet'
    results['pts_filename'] = osp.join(data_path, info['pts_path'])
liyinhao's avatar
liyinhao committed
54
    if info['annos']['gt_num'] != 0:
liyinhao's avatar
liyinhao committed
55
56
57
        scannet_gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
            np.float32)
        scannet_gt_labels_3d = info['annos']['class'].astype(np.long)
liyinhao's avatar
liyinhao committed
58
59
    else:
        scannet_gt_bboxes_3d = np.zeros((1, 6), dtype=np.float32)
liyinhao's avatar
liyinhao committed
60
        scannet_gt_labels_3d = np.zeros((1, ), dtype=np.long)
zhangwenwei's avatar
zhangwenwei committed
61
62
    results['ann_info'] = dict()
    results['ann_info']['pts_instance_mask_path'] = osp.join(
liyinhao's avatar
liyinhao committed
63
        data_path, info['pts_instance_mask_path'])
zhangwenwei's avatar
zhangwenwei committed
64
    results['ann_info']['pts_semantic_mask_path'] = osp.join(
liyinhao's avatar
liyinhao committed
65
        data_path, info['pts_semantic_mask_path'])
wuyuefeng's avatar
wuyuefeng committed
66
67
    results['ann_info']['gt_bboxes_3d'] = DepthInstance3DBoxes(
        scannet_gt_bboxes_3d, box_dim=6, with_yaw=False)
zhangwenwei's avatar
zhangwenwei committed
68
    results['ann_info']['gt_labels_3d'] = scannet_gt_labels_3d
69

wuyuefeng's avatar
wuyuefeng committed
70
    results['img_fields'] = []
zhangwenwei's avatar
zhangwenwei committed
71
72
73
    results['bbox3d_fields'] = []
    results['pts_mask_fields'] = []
    results['pts_seg_fields'] = []
74

liyinhao's avatar
liyinhao committed
75
    results = pipeline(results)
76
77
78

    points = results['points']._data
    gt_bboxes_3d = results['gt_bboxes_3d']._data
liyinhao's avatar
liyinhao committed
79
    gt_labels_3d = results['gt_labels_3d']._data
80
81
    pts_semantic_mask = results['pts_semantic_mask']._data
    pts_instance_mask = results['pts_instance_mask']._data
wuyuefeng's avatar
wuyuefeng committed
82
83
84
85
86
    expected_points = torch.tensor([[-2.7231, -2.2068, 2.3543, 2.3895],
                                    [-0.4065, -3.4857, 2.1330, 2.1682],
                                    [-1.4578, 1.3510, -0.0441, -0.0089],
                                    [2.2428, -1.1323, -0.0288, 0.0064],
                                    [0.7052, -2.9752, 1.5560, 1.5912]])
wuyuefeng's avatar
wuyuefeng committed
87
    expected_gt_bboxes_3d = torch.tensor(
wuyuefeng's avatar
wuyuefeng committed
88
89
90
91
92
        [[-1.1835, -3.6317, 1.8565, 1.7577, 0.3761, 0.5724, 0.0000],
         [-3.1832, 3.2269, 1.5268, 0.6727, 0.2251, 0.6715, 0.0000],
         [-0.9598, -2.2864, 0.6165, 0.7506, 2.5709, 1.2145, 0.0000],
         [-2.6988, -2.7354, 0.9722, 0.7680, 1.8877, 0.2870, 0.0000],
         [3.2989, 0.2885, 1.0712, 0.7600, 3.8814, 2.1603, 0.0000]])
liyinhao's avatar
liyinhao committed
93
    expected_gt_labels_3d = np.array([
liyinhao's avatar
liyinhao committed
94
95
96
97
98
        6, 6, 4, 9, 11, 11, 10, 0, 15, 17, 17, 17, 3, 12, 4, 4, 14, 1, 0, 0, 0,
        0, 0, 0, 5, 5, 5
    ])
    expected_pts_semantic_mask = np.array([3, 1, 2, 2, 15])
    expected_pts_instance_mask = np.array([44, 22, 10, 10, 57])
wuyuefeng's avatar
wuyuefeng committed
99
    assert torch.allclose(points, expected_points, 1e-2)
wuyuefeng's avatar
wuyuefeng committed
100
101
    assert torch.allclose(gt_bboxes_3d.tensor[:5, :], expected_gt_bboxes_3d,
                          1e-2)
liyinhao's avatar
liyinhao committed
102
    assert np.all(gt_labels_3d.numpy() == expected_gt_labels_3d)
103
104
    assert np.all(pts_semantic_mask.numpy() == expected_pts_semantic_mask)
    assert np.all(pts_instance_mask.numpy() == expected_pts_instance_mask)
yinchimaoliang's avatar
yinchimaoliang committed
105
106
107


def test_sunrgbd_pipeline():
108
109
    class_names = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk',
                   'dresser', 'night_stand', 'bookshelf', 'bathtub')
yinchimaoliang's avatar
yinchimaoliang committed
110
111
112
    np.random.seed(0)
    pipelines = [
        dict(
zhangwenwei's avatar
zhangwenwei committed
113
114
            type='LoadPointsFromFile',
            shift_height=True,
yinchimaoliang's avatar
yinchimaoliang committed
115
116
            load_dim=6,
            use_dim=[0, 1, 2]),
zhangwenwei's avatar
zhangwenwei committed
117
        dict(type='LoadAnnotations3D'),
yinchimaoliang's avatar
yinchimaoliang committed
118
        dict(
wuyuefeng's avatar
wuyuefeng committed
119
120
121
122
123
124
125
126
127
            type='RandomFlip3D',
            sync_2d=False,
            flip_ratio_bev_horizontal=1.0,
        ),
        dict(
            type='GlobalRotScaleTrans',
            rot_range=[-0.523599, 0.523599],
            scale_ratio_range=[0.85, 1.15],
            shift_height=True),
yinchimaoliang's avatar
yinchimaoliang committed
128
        dict(type='IndoorPointSample', num_points=5),
129
        dict(type='DefaultFormatBundle3D', class_names=class_names),
liyinhao's avatar
liyinhao committed
130
131
        dict(
            type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']),
yinchimaoliang's avatar
yinchimaoliang committed
132
133
134
135
    ]
    pipeline = Compose(pipelines)
    results = dict()
    info = mmcv.load('./tests/data/sunrgbd/sunrgbd_infos.pkl')[0]
liyinhao's avatar
liyinhao committed
136
137
    data_path = './tests/data/sunrgbd'
    results['pts_filename'] = osp.join(data_path, info['pts_path'])
yinchimaoliang's avatar
yinchimaoliang committed
138
139

    if info['annos']['gt_num'] != 0:
liyinhao's avatar
liyinhao committed
140
141
142
        gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
            np.float32)
        gt_labels_3d = info['annos']['class'].astype(np.long)
yinchimaoliang's avatar
yinchimaoliang committed
143
    else:
wuyuefeng's avatar
wuyuefeng committed
144
        gt_bboxes_3d = np.zeros((1, 7), dtype=np.float32)
liyinhao's avatar
liyinhao committed
145
        gt_labels_3d = np.zeros((1, ), dtype=np.long)
146

zhangwenwei's avatar
zhangwenwei committed
147
148
    # prepare input of pipeline
    results['ann_info'] = dict()
wuyuefeng's avatar
wuyuefeng committed
149
    results['ann_info']['gt_bboxes_3d'] = DepthInstance3DBoxes(gt_bboxes_3d)
zhangwenwei's avatar
zhangwenwei committed
150
    results['ann_info']['gt_labels_3d'] = gt_labels_3d
wuyuefeng's avatar
wuyuefeng committed
151
    results['img_fields'] = []
zhangwenwei's avatar
zhangwenwei committed
152
153
154
155
    results['bbox3d_fields'] = []
    results['pts_mask_fields'] = []
    results['pts_seg_fields'] = []

yinchimaoliang's avatar
yinchimaoliang committed
156
    results = pipeline(results)
157
158
    points = results['points']._data
    gt_bboxes_3d = results['gt_bboxes_3d']._data
liyinhao's avatar
liyinhao committed
159
    gt_labels_3d = results['gt_labels_3d']._data
wuyuefeng's avatar
wuyuefeng committed
160
161
162
163
164
    expected_points = torch.tensor([[0.8678, 1.3470, 0.1105, 0.0905],
                                    [0.8707, 1.3635, 0.0437, 0.0238],
                                    [0.8636, 1.3511, 0.0504, 0.0304],
                                    [0.8690, 1.3461, 0.1265, 0.1065],
                                    [0.8668, 1.3434, 0.1216, 0.1017]])
wuyuefeng's avatar
wuyuefeng committed
165
    expected_gt_bboxes_3d = torch.tensor(
wuyuefeng's avatar
wuyuefeng committed
166
167
168
        [[-1.2136, 4.0206, -0.2412, 2.2493, 1.8444, 1.9245, 1.3989],
         [-2.7420, 4.5777, -0.7686, 0.5718, 0.8629, 0.9510, 1.4446],
         [0.9729, 1.9087, -0.1443, 0.6965, 1.5273, 2.0563, 2.9924]])
liyinhao's avatar
liyinhao committed
169
    expected_gt_labels_3d = np.array([0, 7, 6])
wuyuefeng's avatar
wuyuefeng committed
170
    assert torch.allclose(gt_bboxes_3d.tensor, expected_gt_bboxes_3d, 1e-3)
liyinhao's avatar
liyinhao committed
171
    assert np.allclose(gt_labels_3d.flatten(), expected_gt_labels_3d)
wuyuefeng's avatar
wuyuefeng committed
172
    assert torch.allclose(points, expected_points, 1e-2)