"official/nlp/optimization.py" did not exist on "1fb34e76c1f43dc3917445bf4cb5f8559b49941e"
test_sunrgbd_dataset.py 13 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
liyinhao's avatar
liyinhao committed
2
import numpy as np
Wenwei Zhang's avatar
Wenwei Zhang committed
3
import pytest
liyinhao's avatar
liyinhao committed
4
import torch
liyinhao's avatar
liyinhao committed
5

6
from mmdet3d.datasets import SUNRGBDDataset
liyinhao's avatar
liyinhao committed
7
8


9
def _generate_sunrgbd_dataset_config():
liyinhao's avatar
liyinhao committed
10
    root_path = './tests/data/sunrgbd'
liyinhao's avatar
liyinhao committed
11
12
13
14
15
    ann_file = './tests/data/sunrgbd/sunrgbd_infos.pkl'
    class_names = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk',
                   'dresser', 'night_stand', 'bookshelf', 'bathtub')
    pipelines = [
        dict(
zhangwenwei's avatar
zhangwenwei committed
16
            type='LoadPointsFromFile',
17
            coord_type='DEPTH',
zhangwenwei's avatar
zhangwenwei committed
18
            shift_height=True,
liyinhao's avatar
liyinhao committed
19
20
            load_dim=6,
            use_dim=[0, 1, 2]),
zhangwenwei's avatar
zhangwenwei committed
21
        dict(type='LoadAnnotations3D'),
liyinhao's avatar
liyinhao committed
22
        dict(
wuyuefeng's avatar
wuyuefeng committed
23
24
25
26
27
28
29
30
31
            type='RandomFlip3D',
            sync_2d=False,
            flip_ratio_bev_horizontal=0.5,
        ),
        dict(
            type='GlobalRotScaleTrans',
            rot_range=[-0.523599, 0.523599],
            scale_ratio_range=[0.85, 1.15],
            shift_height=True),
32
        dict(type='PointSample', num_points=5),
liyinhao's avatar
liyinhao committed
33
        dict(type='DefaultFormatBundle3D', class_names=class_names),
zhangwenwei's avatar
zhangwenwei committed
34
        dict(
liyinhao's avatar
liyinhao committed
35
36
37
            type='Collect3D',
            keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'],
            meta_keys=[
wuyuefeng's avatar
wuyuefeng committed
38
39
                'file_name', 'pcd_horizontal_flip', 'sample_idx',
                'pcd_scale_factor', 'pcd_rotation'
liyinhao's avatar
liyinhao committed
40
            ]),
liyinhao's avatar
liyinhao committed
41
    ]
42
43
    modality = dict(use_lidar=True, use_camera=False)
    return root_path, ann_file, class_names, pipelines, modality
liyinhao's avatar
liyinhao committed
44

45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76

def _generate_sunrgbd_multi_modality_dataset_config():
    root_path = './tests/data/sunrgbd'
    ann_file = './tests/data/sunrgbd/sunrgbd_infos.pkl'
    class_names = ('bed', 'table', 'sofa', 'chair', 'toilet', 'desk',
                   'dresser', 'night_stand', 'bookshelf', 'bathtub')
    img_norm_cfg = dict(
        mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
    pipelines = [
        dict(
            type='LoadPointsFromFile',
            coord_type='DEPTH',
            shift_height=True,
            load_dim=6,
            use_dim=[0, 1, 2]),
        dict(type='LoadImageFromFile'),
        dict(type='LoadAnnotations3D'),
        dict(type='LoadAnnotations', with_bbox=True),
        dict(type='Resize', img_scale=(1333, 600), keep_ratio=True),
        dict(type='RandomFlip', flip_ratio=0.0),
        dict(type='Normalize', **img_norm_cfg),
        dict(type='Pad', size_divisor=32),
        dict(
            type='RandomFlip3D',
            sync_2d=False,
            flip_ratio_bev_horizontal=0.5,
        ),
        dict(
            type='GlobalRotScaleTrans',
            rot_range=[-0.523599, 0.523599],
            scale_ratio_range=[0.85, 1.15],
            shift_height=True),
77
        dict(type='PointSample', num_points=5),
78
79
80
81
82
        dict(type='DefaultFormatBundle3D', class_names=class_names),
        dict(
            type='Collect3D',
            keys=[
                'img', 'gt_bboxes', 'gt_labels', 'points', 'gt_bboxes_3d',
83
                'gt_labels_3d'
84
85
86
87
88
89
90
91
92
93
94
95
96
            ])
    ]
    modality = dict(use_lidar=True, use_camera=True)
    return root_path, ann_file, class_names, pipelines, modality


def test_getitem():
    np.random.seed(0)
    root_path, ann_file, class_names, pipelines, modality = \
        _generate_sunrgbd_dataset_config()

    sunrgbd_dataset = SUNRGBDDataset(
        root_path, ann_file, pipelines, modality=modality)
liyinhao's avatar
liyinhao committed
97
98
99
    data = sunrgbd_dataset[0]
    points = data['points']._data
    gt_bboxes_3d = data['gt_bboxes_3d']._data
zhangwenwei's avatar
zhangwenwei committed
100
    gt_labels_3d = data['gt_labels_3d']._data
zhangwenwei's avatar
zhangwenwei committed
101
    file_name = data['img_metas']._data['file_name']
wuyuefeng's avatar
wuyuefeng committed
102
103
104
    pcd_horizontal_flip = data['img_metas']._data['pcd_horizontal_flip']
    pcd_scale_factor = data['img_metas']._data['pcd_scale_factor']
    pcd_rotation = data['img_metas']._data['pcd_rotation']
zhangwenwei's avatar
zhangwenwei committed
105
    sample_idx = data['img_metas']._data['sample_idx']
wuyuefeng's avatar
wuyuefeng committed
106
107
108
109
110
111
112
    pcd_rotation_expected = np.array([[0.99889565, 0.04698427, 0.],
                                      [-0.04698427, 0.99889565, 0.],
                                      [0., 0., 1.]])
    assert file_name == './tests/data/sunrgbd/points/000001.bin'
    assert pcd_horizontal_flip is False
    assert abs(pcd_scale_factor - 0.9770964398016714) < 1e-5
    assert np.allclose(pcd_rotation, pcd_rotation_expected, 1e-3)
liyinhao's avatar
liyinhao committed
113
    assert sample_idx == 1
wuyuefeng's avatar
wuyuefeng committed
114
115
116
117
118
    expected_points = torch.tensor([[-0.9904, 1.2596, 0.1105, 0.0905],
                                    [-0.9948, 1.2758, 0.0437, 0.0238],
                                    [-0.9866, 1.2641, 0.0504, 0.0304],
                                    [-0.9915, 1.2586, 0.1265, 0.1065],
                                    [-0.9890, 1.2561, 0.1216, 0.1017]])
wuyuefeng's avatar
wuyuefeng committed
119
    expected_gt_bboxes_3d = torch.tensor(
wuyuefeng's avatar
wuyuefeng committed
120
121
122
        [[0.8308, 4.1168, -1.2035, 2.2493, 1.8444, 1.9245, 1.6486],
         [2.3002, 4.8149, -1.2442, 0.5718, 0.8629, 0.9510, 1.6030],
         [-1.1477, 1.8090, -1.1725, 0.6965, 1.5273, 2.0563, 0.0552]])
liyinhao's avatar
liyinhao committed
123
    expected_gt_labels = np.array([0, 7, 6])
124
    original_classes = sunrgbd_dataset.CLASSES
liyinhao's avatar
liyinhao committed
125

wuyuefeng's avatar
wuyuefeng committed
126
127
    assert torch.allclose(points, expected_points, 1e-2)
    assert torch.allclose(gt_bboxes_3d.tensor, expected_gt_bboxes_3d, 1e-3)
zhangwenwei's avatar
zhangwenwei committed
128
    assert np.all(gt_labels_3d.numpy() == expected_gt_labels)
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
    assert original_classes == class_names

    SUNRGBD_dataset = SUNRGBDDataset(
        root_path, ann_file, pipeline=None, classes=['bed', 'table'])
    assert SUNRGBD_dataset.CLASSES != original_classes
    assert SUNRGBD_dataset.CLASSES == ['bed', 'table']

    SUNRGBD_dataset = SUNRGBDDataset(
        root_path, ann_file, pipeline=None, classes=('bed', 'table'))
    assert SUNRGBD_dataset.CLASSES != original_classes
    assert SUNRGBD_dataset.CLASSES == ('bed', 'table')

    import tempfile
    tmp_file = tempfile.NamedTemporaryFile()
    with open(tmp_file.name, 'w') as f:
        f.write('bed\ntable\n')

    SUNRGBD_dataset = SUNRGBDDataset(
        root_path, ann_file, pipeline=None, classes=tmp_file.name)
    assert SUNRGBD_dataset.CLASSES != original_classes
    assert SUNRGBD_dataset.CLASSES == ['bed', 'table']
liyinhao's avatar
liyinhao committed
150

151
152
153
154
155
156
157
158
159
160
161
162
    # test multi-modality SUN RGB-D dataset
    np.random.seed(0)
    root_path, ann_file, class_names, multi_modality_pipelines, modality = \
        _generate_sunrgbd_multi_modality_dataset_config()
    sunrgbd_dataset = SUNRGBDDataset(
        root_path, ann_file, multi_modality_pipelines, modality=modality)
    data = sunrgbd_dataset[0]

    points = data['points']._data
    gt_bboxes_3d = data['gt_bboxes_3d']._data
    gt_labels_3d = data['gt_labels_3d']._data
    img = data['img']._data
163
    depth2img = data['img_metas']._data['depth2img']
164

165
166
167
168
169
170
171
172
    expected_rt_mat = np.array([[0.97959, 0.012593, -0.20061],
                                [0.012593, 0.99223, 0.12377],
                                [0.20061, -0.12377, 0.97182]])
    expected_k_mat = np.array([[529.5, 0., 0.], [0., 529.5, 0.],
                               [365., 265., 1.]])
    rt_mat = np.array([[1, 0, 0], [0, 0, -1], [0, 1, 0]
                       ]) @ expected_rt_mat.transpose(1, 0)
    expected_depth2img = expected_k_mat @ rt_mat
173
174
175
176
177

    assert torch.allclose(points, expected_points, 1e-2)
    assert torch.allclose(gt_bboxes_3d.tensor, expected_gt_bboxes_3d, 1e-3)
    assert np.all(gt_labels_3d.numpy() == expected_gt_labels)
    assert img.shape[:] == (3, 608, 832)
178
    assert np.allclose(depth2img, expected_depth2img)
179

liyinhao's avatar
liyinhao committed
180
181

def test_evaluate():
Wenwei Zhang's avatar
Wenwei Zhang committed
182
183
    if not torch.cuda.is_available():
        pytest.skip()
wuyuefeng's avatar
wuyuefeng committed
184
    from mmdet3d.core.bbox.structures import DepthInstance3DBoxes
185
186
187
188
    root_path, ann_file, _, pipelines, modality = \
        _generate_sunrgbd_dataset_config()
    sunrgbd_dataset = SUNRGBDDataset(
        root_path, ann_file, pipelines, modality=modality)
liyinhao's avatar
liyinhao committed
189
190
    results = []
    pred_boxes = dict()
wuyuefeng's avatar
wuyuefeng committed
191
192
193
194
195
196
197
    pred_boxes['boxes_3d'] = DepthInstance3DBoxes(
        torch.tensor(
            [[1.0473, 4.1687, -1.2317, 2.3021, 1.8876, 1.9696, 1.6956],
             [2.5831, 4.8117, -1.2733, 0.5852, 0.8832, 0.9733, 1.6500],
             [-1.0864, 1.9045, -1.2000, 0.7128, 1.5631, 2.1045, 0.1022]]))
    pred_boxes['labels_3d'] = torch.tensor([0, 7, 6])
    pred_boxes['scores_3d'] = torch.tensor([0.5, 1.0, 1.0])
liyinhao's avatar
liyinhao committed
198
    results.append(pred_boxes)
liyinhao's avatar
liyinhao committed
199
    metric = [0.25, 0.5]
liyinhao's avatar
liyinhao committed
200
    ap_dict = sunrgbd_dataset.evaluate(results, metric)
liyinhao's avatar
liyinhao committed
201
202
203
    bed_precision_25 = ap_dict['bed_AP_0.25']
    dresser_precision_25 = ap_dict['dresser_AP_0.25']
    night_stand_precision_25 = ap_dict['night_stand_AP_0.25']
liyinhao's avatar
liyinhao committed
204
205
206
    assert abs(bed_precision_25 - 1) < 0.01
    assert abs(dresser_precision_25 - 1) < 0.01
    assert abs(night_stand_precision_25 - 1) < 0.01
yinchimaoliang's avatar
yinchimaoliang committed
207
208
209
210
211
212
213
214


def test_show():
    import mmcv
    import tempfile
    from os import path as osp

    from mmdet3d.core.bbox import DepthInstance3DBoxes
215
216
    tmp_dir = tempfile.TemporaryDirectory()
    temp_dir = tmp_dir.name
217
    root_path, ann_file, class_names, pipelines, modality = \
218
219
220
        _generate_sunrgbd_dataset_config()
    sunrgbd_dataset = SUNRGBDDataset(
        root_path, ann_file, pipelines, modality=modality)
yinchimaoliang's avatar
yinchimaoliang committed
221
222
223
224
225
226
227
228
229
230
231
232
    boxes_3d = DepthInstance3DBoxes(
        torch.tensor(
            [[1.1500, 4.2614, -1.0669, 1.3219, 2.1593, 1.0267, 1.6473],
             [-0.9583, 2.1916, -1.0881, 0.6213, 1.3022, 1.6275, -3.0720],
             [2.5697, 4.8152, -1.1157, 0.5421, 0.7019, 0.7896, 1.6712],
             [0.7283, 2.5448, -1.0356, 0.7691, 0.9056, 0.5771, 1.7121],
             [-0.9860, 3.2413, -1.2349, 0.5110, 0.9940, 1.1245, 0.3295]]))
    scores_3d = torch.tensor(
        [1.5280e-01, 1.6682e-03, 6.2811e-04, 1.2860e-03, 9.4229e-06])
    labels_3d = torch.tensor([0, 0, 0, 0, 0])
    result = dict(boxes_3d=boxes_3d, scores_3d=scores_3d, labels_3d=labels_3d)
    results = [result]
233
    sunrgbd_dataset.show(results, temp_dir, show=False)
yinchimaoliang's avatar
yinchimaoliang committed
234
    pts_file_path = osp.join(temp_dir, '000001', '000001_points.obj')
235
236
    gt_file_path = osp.join(temp_dir, '000001', '000001_gt.obj')
    pred_file_path = osp.join(temp_dir, '000001', '000001_pred.obj')
yinchimaoliang's avatar
yinchimaoliang committed
237
238
239
    mmcv.check_file_exist(pts_file_path)
    mmcv.check_file_exist(gt_file_path)
    mmcv.check_file_exist(pred_file_path)
240
241
    tmp_dir.cleanup()

242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
    # test show with pipeline
    eval_pipeline = [
        dict(
            type='LoadPointsFromFile',
            coord_type='DEPTH',
            shift_height=True,
            load_dim=6,
            use_dim=[0, 1, 2]),
        dict(
            type='DefaultFormatBundle3D',
            class_names=class_names,
            with_label=False),
        dict(type='Collect3D', keys=['points'])
    ]
    tmp_dir = tempfile.TemporaryDirectory()
    temp_dir = tmp_dir.name
    sunrgbd_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)
    pts_file_path = osp.join(temp_dir, '000001', '000001_points.obj')
    gt_file_path = osp.join(temp_dir, '000001', '000001_gt.obj')
    pred_file_path = osp.join(temp_dir, '000001', '000001_pred.obj')
    mmcv.check_file_exist(pts_file_path)
    mmcv.check_file_exist(gt_file_path)
    mmcv.check_file_exist(pred_file_path)
    tmp_dir.cleanup()

267
268
269
    # test multi-modality show
    tmp_dir = tempfile.TemporaryDirectory()
    temp_dir = tmp_dir.name
270
    root_path, ann_file, class_names, multi_modality_pipelines, modality = \
271
272
273
        _generate_sunrgbd_multi_modality_dataset_config()
    sunrgbd_dataset = SUNRGBDDataset(
        root_path, ann_file, multi_modality_pipelines, modality=modality)
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
    sunrgbd_dataset.show(results, temp_dir, False, multi_modality_pipelines)
    pts_file_path = osp.join(temp_dir, '000001', '000001_points.obj')
    gt_file_path = osp.join(temp_dir, '000001', '000001_gt.obj')
    pred_file_path = osp.join(temp_dir, '000001', '000001_pred.obj')
    img_file_path = osp.join(temp_dir, '000001', '000001_img.png')
    img_pred_path = osp.join(temp_dir, '000001', '000001_pred.png')
    img_gt_file = osp.join(temp_dir, '000001', '000001_gt.png')
    mmcv.check_file_exist(pts_file_path)
    mmcv.check_file_exist(gt_file_path)
    mmcv.check_file_exist(pred_file_path)
    mmcv.check_file_exist(img_file_path)
    mmcv.check_file_exist(img_pred_path)
    mmcv.check_file_exist(img_gt_file)
    tmp_dir.cleanup()

    # test multi-modality show with pipeline
    eval_pipeline = [
        dict(type='LoadImageFromFile'),
        dict(
            type='LoadPointsFromFile',
            coord_type='DEPTH',
            shift_height=True,
            load_dim=6,
            use_dim=[0, 1, 2]),
        dict(
            type='DefaultFormatBundle3D',
            class_names=class_names,
            with_label=False),
302
        dict(type='Collect3D', keys=['points', 'img'])
303
304
305
306
    ]
    tmp_dir = tempfile.TemporaryDirectory()
    temp_dir = tmp_dir.name
    sunrgbd_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)
307
308
309
310
311
312
313
314
315
316
317
318
319
    pts_file_path = osp.join(temp_dir, '000001', '000001_points.obj')
    gt_file_path = osp.join(temp_dir, '000001', '000001_gt.obj')
    pred_file_path = osp.join(temp_dir, '000001', '000001_pred.obj')
    img_file_path = osp.join(temp_dir, '000001', '000001_img.png')
    img_pred_path = osp.join(temp_dir, '000001', '000001_pred.png')
    img_gt_file = osp.join(temp_dir, '000001', '000001_gt.png')
    mmcv.check_file_exist(pts_file_path)
    mmcv.check_file_exist(gt_file_path)
    mmcv.check_file_exist(pred_file_path)
    mmcv.check_file_exist(img_file_path)
    mmcv.check_file_exist(img_pred_path)
    mmcv.check_file_exist(img_gt_file)
    tmp_dir.cleanup()