test_indoor_eval.py 6.47 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
liyinhao's avatar
liyinhao committed
2
import numpy as np
Wenwei Zhang's avatar
Wenwei Zhang committed
3
import pytest
liyinhao's avatar
liyinhao committed
4
import torch
liyinhao's avatar
liyinhao committed
5

liyinhao's avatar
liyinhao committed
6
from mmdet3d.core.evaluation.indoor_eval import average_precision, indoor_eval
liyinhao's avatar
liyinhao committed
7
8
9


def test_indoor_eval():
Wenwei Zhang's avatar
Wenwei Zhang committed
10
11
    if not torch.cuda.is_available():
        pytest.skip()
zhangwenwei's avatar
zhangwenwei committed
12
    from mmdet3d.core.bbox.structures import Box3DMode, DepthInstance3DBoxes
liyinhao's avatar
liyinhao committed
13
14
    det_infos = [{
        'labels_3d':
wuyuefeng's avatar
wuyuefeng committed
15
        torch.tensor([0, 1, 2, 2, 0, 3, 1, 2, 3, 2]),
liyinhao's avatar
liyinhao committed
16
        'boxes_3d':
wuyuefeng's avatar
wuyuefeng committed
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
        DepthInstance3DBoxes(
            torch.tensor([[
                -2.4089e-03, -3.3174e+00, 4.9438e-01, 2.1668e+00, 2.8431e-01,
                1.6506e+00, 0.0000e+00
            ],
                          [
                              -3.4269e-01, -2.7565e+00, 2.8144e-02, 6.8554e-01,
                              9.6854e-01, 6.1755e-01, 0.0000e+00
                          ],
                          [
                              -3.8320e+00, -1.0646e+00, 1.7074e-01, 2.4981e-01,
                              4.4708e-01, 6.2538e-01, 0.0000e+00
                          ],
                          [
                              4.1073e-01, 3.3757e+00, 3.4311e-01, 8.0617e-01,
                              2.8679e-01, 1.6060e+00, 0.0000e+00
                          ],
                          [
                              6.1199e-01, -3.1041e+00, 4.1873e-01, 1.2310e+00,
                              4.0162e-01, 1.7303e+00, 0.0000e+00
                          ],
                          [
                              -5.9877e-01, -2.6011e+00, 1.1148e+00, 1.5704e-01,
                              7.5957e-01, 9.6930e-01, 0.0000e+00
                          ],
                          [
                              2.7462e-01, -3.0088e+00, 6.5231e-02, 8.1208e-01,
                              4.1861e-01, 3.7339e-01, 0.0000e+00
                          ],
                          [
                              -1.4704e+00, -2.0024e+00, 2.7479e-01, 1.7888e+00,
                              1.0566e+00, 1.3704e+00, 0.0000e+00
                          ],
                          [
                              8.2727e-02, -3.1160e+00, 2.5690e-01, 1.4054e+00,
                              2.0772e-01, 9.6792e-01, 0.0000e+00
                          ],
                          [
                              2.6896e+00, 1.9881e+00, 1.1566e+00, 9.9885e-02,
                              3.5713e-01, 4.5638e-01, 0.0000e+00
                          ]]),
            origin=(0.5, 0.5, 0)),
liyinhao's avatar
liyinhao committed
59
        'scores_3d':
wuyuefeng's avatar
wuyuefeng committed
60
61
62
63
        torch.tensor([
            1.7516e-05, 1.0167e-06, 8.4486e-07, 7.1048e-02, 6.4274e-05,
            1.5003e-07, 5.8102e-06, 1.9399e-08, 5.3126e-07, 1.8630e-09
        ])
liyinhao's avatar
liyinhao committed
64
    }]
liyinhao's avatar
liyinhao committed
65
66
67
68
69
70
71
72
73

    label2cat = {
        0: 'cabinet',
        1: 'bed',
        2: 'chair',
        3: 'sofa',
    }
    gt_annos = [{
        'gt_num':
wuyuefeng's avatar
wuyuefeng committed
74
        10,
liyinhao's avatar
liyinhao committed
75
76
        'gt_boxes_upright_depth':
        np.array([[
wuyuefeng's avatar
wuyuefeng committed
77
78
            -2.4089e-03, -3.3174e+00, 4.9438e-01, 2.1668e+00, 2.8431e-01,
            1.6506e+00, 0.0000e+00
liyinhao's avatar
liyinhao committed
79
        ],
liyinhao's avatar
liyinhao committed
80
                  [
wuyuefeng's avatar
wuyuefeng committed
81
82
                      -3.4269e-01, -2.7565e+00, 2.8144e-02, 6.8554e-01,
                      9.6854e-01, 6.1755e-01, 0.0000e+00
liyinhao's avatar
liyinhao committed
83
84
                  ],
                  [
wuyuefeng's avatar
wuyuefeng committed
85
86
                      -3.8320e+00, -1.0646e+00, 1.7074e-01, 2.4981e-01,
                      4.4708e-01, 6.2538e-01, 0.0000e+00
liyinhao's avatar
liyinhao committed
87
88
                  ],
                  [
wuyuefeng's avatar
wuyuefeng committed
89
90
                      4.1073e-01, 3.3757e+00, 3.4311e-01, 8.0617e-01,
                      2.8679e-01, 1.6060e+00, 0.0000e+00
liyinhao's avatar
liyinhao committed
91
92
                  ],
                  [
wuyuefeng's avatar
wuyuefeng committed
93
94
                      6.1199e-01, -3.1041e+00, 4.1873e-01, 1.2310e+00,
                      4.0162e-01, 1.7303e+00, 0.0000e+00
liyinhao's avatar
liyinhao committed
95
96
                  ],
                  [
wuyuefeng's avatar
wuyuefeng committed
97
98
                      -5.9877e-01, -2.6011e+00, 1.1148e+00, 1.5704e-01,
                      7.5957e-01, 9.6930e-01, 0.0000e+00
liyinhao's avatar
liyinhao committed
99
100
                  ],
                  [
wuyuefeng's avatar
wuyuefeng committed
101
102
                      2.7462e-01, -3.0088e+00, 6.5231e-02, 8.1208e-01,
                      4.1861e-01, 3.7339e-01, 0.0000e+00
liyinhao's avatar
liyinhao committed
103
104
                  ],
                  [
wuyuefeng's avatar
wuyuefeng committed
105
106
                      -1.4704e+00, -2.0024e+00, 2.7479e-01, 1.7888e+00,
                      1.0566e+00, 1.3704e+00, 0.0000e+00
liyinhao's avatar
liyinhao committed
107
108
                  ],
                  [
wuyuefeng's avatar
wuyuefeng committed
109
110
                      8.2727e-02, -3.1160e+00, 2.5690e-01, 1.4054e+00,
                      2.0772e-01, 9.6792e-01, 0.0000e+00
liyinhao's avatar
liyinhao committed
111
112
                  ],
                  [
wuyuefeng's avatar
wuyuefeng committed
113
114
                      2.6896e+00, 1.9881e+00, 1.1566e+00, 9.9885e-02,
                      3.5713e-01, 4.5638e-01, 0.0000e+00
liyinhao's avatar
liyinhao committed
115
                  ]]),
liyinhao's avatar
liyinhao committed
116
        'class':
wuyuefeng's avatar
wuyuefeng committed
117
        np.array([0, 1, 2, 0, 0, 3, 1, 3, 3, 2])
liyinhao's avatar
liyinhao committed
118
119
    }]

wuyuefeng's avatar
wuyuefeng committed
120
121
122
123
124
125
126
    ret_value = indoor_eval(
        gt_annos,
        det_infos, [0.25, 0.5],
        label2cat,
        box_type_3d=DepthInstance3DBoxes,
        box_mode_3d=Box3DMode.DEPTH)

yinchimaoliang's avatar
yinchimaoliang committed
127
128
129
130
131
    assert np.isclose(ret_value['cabinet_AP_0.25'], 0.666667)
    assert np.isclose(ret_value['bed_AP_0.25'], 1.0)
    assert np.isclose(ret_value['chair_AP_0.25'], 0.5)
    assert np.isclose(ret_value['mAP_0.25'], 0.708333)
    assert np.isclose(ret_value['mAR_0.25'], 0.833333)
liyinhao's avatar
liyinhao committed
132
133


134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
def test_indoor_eval_less_classes():
    if not torch.cuda.is_available():
        pytest.skip()
    from mmdet3d.core.bbox.structures import Box3DMode, DepthInstance3DBoxes
    det_infos = [{
        'labels_3d':
        torch.tensor([0]),
        'boxes_3d':
        DepthInstance3DBoxes(torch.tensor([[1., 1., 1., 1., 1., 1., 1.]])),
        'scores_3d':
        torch.tensor([.5])
    }, {
        'labels_3d':
        torch.tensor([1]),
        'boxes_3d':
        DepthInstance3DBoxes(torch.tensor([[1., 1., 1., 1., 1., 1., 1.]])),
        'scores_3d':
        torch.tensor([.5])
    }]

    label2cat = {0: 'cabinet', 1: 'bed', 2: 'chair'}
    gt_annos = [{
        'gt_num':
        2,
        'gt_boxes_upright_depth':
        np.array([[0., 0., 0., 1., 1., 1., 1.], [1., 1., 1., 1., 1., 1., 1.]]),
        'class':
        np.array([2, 0])
    }, {
        'gt_num':
        1,
        'gt_boxes_upright_depth':
        np.array([
            [1., 1., 1., 1., 1., 1., 1.],
        ]),
        'class':
        np.array([1])
    }]

    ret_value = indoor_eval(
        gt_annos,
        det_infos, [0.25, 0.5],
        label2cat,
        box_type_3d=DepthInstance3DBoxes,
        box_mode_3d=Box3DMode.DEPTH)

    assert np.isclose(ret_value['mAP_0.25'], 0.666667)
    assert np.isclose(ret_value['mAR_0.25'], 0.666667)


liyinhao's avatar
liyinhao committed
184
185
186
187
188
def test_average_precision():
    ap = average_precision(
        np.array([[0.25, 0.5, 0.75], [0.25, 0.5, 0.75]]),
        np.array([[1., 1., 1.], [1., 1., 1.]]), '11points')
    assert abs(ap[0] - 0.06611571) < 0.001