test_indoor_pipeline.py 6.66 KB
Newer Older
liyinhao's avatar
liyinhao committed
1
2
3
4
import os.path as osp

import mmcv
import numpy as np
wuyuefeng's avatar
wuyuefeng committed
5
import torch
liyinhao's avatar
liyinhao committed
6

wuyuefeng's avatar
wuyuefeng committed
7
from mmdet3d.core.bbox import DepthInstance3DBoxes
liyinhao's avatar
liyinhao committed
8
9
10
11
from mmdet3d.datasets.pipelines import Compose


def test_scannet_pipeline():
12
13
14
15
16
    class_names = ('cabinet', 'bed', 'chair', 'sofa', 'table', 'door',
                   'window', 'bookshelf', 'picture', 'counter', 'desk',
                   'curtain', 'refrigerator', 'showercurtrain', 'toilet',
                   'sink', 'bathtub', 'garbagebin')

liyinhao's avatar
liyinhao committed
17
18
19
    np.random.seed(0)
    pipelines = [
        dict(
zhangwenwei's avatar
zhangwenwei committed
20
21
            type='LoadPointsFromFile',
            shift_height=True,
liyinhao's avatar
liyinhao committed
22
23
            load_dim=6,
            use_dim=[0, 1, 2]),
zhangwenwei's avatar
zhangwenwei committed
24
25
26
27
28
29
        dict(
            type='LoadAnnotations3D',
            with_bbox_3d=True,
            with_label_3d=True,
            with_mask_3d=True,
            with_seg_3d=True),
liyinhao's avatar
liyinhao committed
30
31
32
33
        dict(type='IndoorPointSample', num_points=5),
        dict(type='IndoorFlipData', flip_ratio_yz=1.0, flip_ratio_xz=1.0),
        dict(
            type='IndoorGlobalRotScale',
zhangwenwei's avatar
zhangwenwei committed
34
            shift_height=True,
liyinhao's avatar
liyinhao committed
35
            rot_range=[-1 / 36, 1 / 36],
36
37
38
39
40
            scale_range=None),
        dict(type='DefaultFormatBundle3D', class_names=class_names),
        dict(
            type='Collect3D',
            keys=[
liyinhao's avatar
liyinhao committed
41
                'points', 'gt_bboxes_3d', 'gt_labels_3d', 'pts_semantic_mask',
42
43
                'pts_instance_mask'
            ]),
liyinhao's avatar
liyinhao committed
44
45
    ]
    pipeline = Compose(pipelines)
yinchimaoliang's avatar
yinchimaoliang committed
46
    info = mmcv.load('./tests/data/scannet/scannet_infos.pkl')[0]
liyinhao's avatar
liyinhao committed
47
    results = dict()
liyinhao's avatar
liyinhao committed
48
49
    data_path = './tests/data/scannet'
    results['pts_filename'] = osp.join(data_path, info['pts_path'])
liyinhao's avatar
liyinhao committed
50
    if info['annos']['gt_num'] != 0:
liyinhao's avatar
liyinhao committed
51
52
53
        scannet_gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
            np.float32)
        scannet_gt_labels_3d = info['annos']['class'].astype(np.long)
liyinhao's avatar
liyinhao committed
54
55
    else:
        scannet_gt_bboxes_3d = np.zeros((1, 6), dtype=np.float32)
liyinhao's avatar
liyinhao committed
56
        scannet_gt_labels_3d = np.zeros((1, ), dtype=np.long)
zhangwenwei's avatar
zhangwenwei committed
57
58
    results['ann_info'] = dict()
    results['ann_info']['pts_instance_mask_path'] = osp.join(
liyinhao's avatar
liyinhao committed
59
        data_path, info['pts_instance_mask_path'])
zhangwenwei's avatar
zhangwenwei committed
60
    results['ann_info']['pts_semantic_mask_path'] = osp.join(
liyinhao's avatar
liyinhao committed
61
        data_path, info['pts_semantic_mask_path'])
wuyuefeng's avatar
wuyuefeng committed
62
63
    results['ann_info']['gt_bboxes_3d'] = DepthInstance3DBoxes(
        scannet_gt_bboxes_3d, box_dim=6, with_yaw=False)
zhangwenwei's avatar
zhangwenwei committed
64
    results['ann_info']['gt_labels_3d'] = scannet_gt_labels_3d
65

zhangwenwei's avatar
zhangwenwei committed
66
67
68
    results['bbox3d_fields'] = []
    results['pts_mask_fields'] = []
    results['pts_seg_fields'] = []
69

liyinhao's avatar
liyinhao committed
70
    results = pipeline(results)
71
72
73

    points = results['points']._data
    gt_bboxes_3d = results['gt_bboxes_3d']._data
liyinhao's avatar
liyinhao committed
74
    gt_labels_3d = results['gt_labels_3d']._data
75
76
    pts_semantic_mask = results['pts_semantic_mask']._data
    pts_instance_mask = results['pts_instance_mask']._data
liyinhao's avatar
liyinhao committed
77
78
79
80
81
82
    expected_points = np.array(
        [[-2.9078157, -1.9569951, 2.3543026, 2.389488],
         [-0.71360034, -3.4359822, 2.1330001, 2.1681855],
         [-1.332374, 1.474838, -0.04405887, -0.00887359],
         [2.1336637, -1.3265059, -0.02880373, 0.00638155],
         [0.43895668, -3.0259454, 1.5560012, 1.5911865]])
wuyuefeng's avatar
wuyuefeng committed
83
84
85
86
87
88
    expected_gt_bboxes_3d = torch.tensor(
        [[-1.5005, -3.5126, 1.8565, 1.7457, 0.2415, 0.5724, 0.0000],
         [-2.8849, 3.4962, 1.5268, 0.6617, 0.1743, 0.6715, 0.0000],
         [-1.1586, -2.1924, 0.6165, 0.5557, 2.5376, 1.2145, 0.0000],
         [-2.9305, -2.4856, 0.9722, 0.6270, 1.8462, 0.2870, 0.0000],
         [3.3115, -0.0048, 1.0712, 0.4619, 3.8605, 2.1603, 0.0000]])
liyinhao's avatar
liyinhao committed
89
    expected_gt_labels_3d = np.array([
liyinhao's avatar
liyinhao committed
90
91
92
93
94
95
        6, 6, 4, 9, 11, 11, 10, 0, 15, 17, 17, 17, 3, 12, 4, 4, 14, 1, 0, 0, 0,
        0, 0, 0, 5, 5, 5
    ])
    expected_pts_semantic_mask = np.array([3, 1, 2, 2, 15])
    expected_pts_instance_mask = np.array([44, 22, 10, 10, 57])
    assert np.allclose(points, expected_points)
wuyuefeng's avatar
wuyuefeng committed
96
97
    assert torch.allclose(gt_bboxes_3d.tensor[:5, :], expected_gt_bboxes_3d,
                          1e-2)
liyinhao's avatar
liyinhao committed
98
    assert np.all(gt_labels_3d.numpy() == expected_gt_labels_3d)
99
100
    assert np.all(pts_semantic_mask.numpy() == expected_pts_semantic_mask)
    assert np.all(pts_instance_mask.numpy() == expected_pts_instance_mask)
yinchimaoliang's avatar
yinchimaoliang committed
101
102
103


def test_sunrgbd_pipeline():
104
105
    class_names = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk',
                   'dresser', 'night_stand', 'bookshelf', 'bathtub')
yinchimaoliang's avatar
yinchimaoliang committed
106
107
108
    np.random.seed(0)
    pipelines = [
        dict(
zhangwenwei's avatar
zhangwenwei committed
109
110
            type='LoadPointsFromFile',
            shift_height=True,
yinchimaoliang's avatar
yinchimaoliang committed
111
112
            load_dim=6,
            use_dim=[0, 1, 2]),
zhangwenwei's avatar
zhangwenwei committed
113
        dict(type='LoadAnnotations3D'),
yinchimaoliang's avatar
yinchimaoliang committed
114
115
116
        dict(type='IndoorFlipData', flip_ratio_yz=1.0),
        dict(
            type='IndoorGlobalRotScale',
zhangwenwei's avatar
zhangwenwei committed
117
            shift_height=True,
liyinhao's avatar
liyinhao committed
118
            rot_range=[-1 / 6, 1 / 6],
yinchimaoliang's avatar
yinchimaoliang committed
119
120
            scale_range=[0.85, 1.15]),
        dict(type='IndoorPointSample', num_points=5),
121
        dict(type='DefaultFormatBundle3D', class_names=class_names),
liyinhao's avatar
liyinhao committed
122
123
        dict(
            type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d']),
yinchimaoliang's avatar
yinchimaoliang committed
124
125
126
127
    ]
    pipeline = Compose(pipelines)
    results = dict()
    info = mmcv.load('./tests/data/sunrgbd/sunrgbd_infos.pkl')[0]
liyinhao's avatar
liyinhao committed
128
129
    data_path = './tests/data/sunrgbd'
    results['pts_filename'] = osp.join(data_path, info['pts_path'])
yinchimaoliang's avatar
yinchimaoliang committed
130
131

    if info['annos']['gt_num'] != 0:
liyinhao's avatar
liyinhao committed
132
133
134
        gt_bboxes_3d = info['annos']['gt_boxes_upright_depth'].astype(
            np.float32)
        gt_labels_3d = info['annos']['class'].astype(np.long)
yinchimaoliang's avatar
yinchimaoliang committed
135
    else:
wuyuefeng's avatar
wuyuefeng committed
136
        gt_bboxes_3d = np.zeros((1, 7), dtype=np.float32)
liyinhao's avatar
liyinhao committed
137
        gt_labels_3d = np.zeros((1, ), dtype=np.long)
138

zhangwenwei's avatar
zhangwenwei committed
139
140
    # prepare input of pipeline
    results['ann_info'] = dict()
wuyuefeng's avatar
wuyuefeng committed
141
    results['ann_info']['gt_bboxes_3d'] = DepthInstance3DBoxes(gt_bboxes_3d)
zhangwenwei's avatar
zhangwenwei committed
142
143
144
145
146
    results['ann_info']['gt_labels_3d'] = gt_labels_3d
    results['bbox3d_fields'] = []
    results['pts_mask_fields'] = []
    results['pts_seg_fields'] = []

yinchimaoliang's avatar
yinchimaoliang committed
147
    results = pipeline(results)
148
149
    points = results['points']._data
    gt_bboxes_3d = results['gt_bboxes_3d']._data
liyinhao's avatar
liyinhao committed
150
    gt_labels_3d = results['gt_labels_3d']._data
liyinhao's avatar
liyinhao committed
151
152
153
154
155
    expected_points = np.array([[0.6512, 1.5781, 0.0710, 0.0499],
                                [0.6473, 1.5701, 0.0657, 0.0447],
                                [0.6464, 1.5635, 0.0826, 0.0616],
                                [0.6453, 1.5603, 0.0849, 0.0638],
                                [0.6488, 1.5786, 0.0461, 0.0251]])
wuyuefeng's avatar
wuyuefeng committed
156
157
158
159
    expected_gt_bboxes_3d = torch.tensor(
        [[-2.0125, 3.9473, -0.2545, 2.3730, 1.9458, 2.0303, 1.2206],
         [-3.7037, 4.2396, -0.8109, 0.6032, 0.9104, 1.0033, 1.2663],
         [0.6529, 2.1638, -0.1523, 0.7348, 1.6113, 2.1694, 2.8140]])
liyinhao's avatar
liyinhao committed
160
    expected_gt_labels_3d = np.array([0, 7, 6])
wuyuefeng's avatar
wuyuefeng committed
161
    assert torch.allclose(gt_bboxes_3d.tensor, expected_gt_bboxes_3d, 1e-3)
liyinhao's avatar
liyinhao committed
162
    assert np.allclose(gt_labels_3d.flatten(), expected_gt_labels_3d)
liyinhao's avatar
liyinhao committed
163
    assert np.allclose(points, expected_points, 1e-2)