test_nms_quadri.py 5.13 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch

from mmcv.utils import IS_CUDA_AVAILABLE


class TestNMSQuadri:

    @pytest.mark.parametrize('device', [
        'cpu',
        pytest.param(
            'cuda',
            marks=pytest.mark.skipif(
                not IS_CUDA_AVAILABLE, reason='requires CUDA support')),
    ])
    def test_ml_nms_quadri(self, device):
        from mmcv.ops import nms_quadri
        np_boxes = np.array([[1.0, 1.0, 3.0, 4.0, 4.0, 4.0, 4.0, 1.0, 0.7],
                             [2.0, 2.0, 3.0, 4.0, 4.0, 2.0, 3.0, 1.0, 0.8],
                             [7.0, 7.0, 8.0, 8.0, 9.0, 7.0, 8.0, 6.0, 0.5],
                             [0.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0, 0.0, 0.9]],
                            dtype=np.float32)
        np_labels = np.array([1, 0, 1, 0], dtype=np.float32)

        np_expect_dets = np.array([[0., 0., 0., 2., 2., 2., 2., 0.],
                                   [2., 2., 3., 4., 4., 2., 3., 1.],
                                   [7., 7., 8., 8., 9., 7., 8., 6.]],
                                  dtype=np.float32)
        np_expect_keep_inds = np.array([3, 1, 2], dtype=np.int64)

        boxes = torch.from_numpy(np_boxes).to(device)
        labels = torch.from_numpy(np_labels).to(device)

        dets, keep_inds = nms_quadri(boxes[:, :8], boxes[:, -1], 0.3, labels)

        assert np.allclose(dets.cpu().numpy()[:, :8], np_expect_dets)
        assert np.allclose(keep_inds.cpu().numpy(), np_expect_keep_inds)

    @pytest.mark.parametrize('device', [
        'cpu',
        pytest.param(
            'cuda',
            marks=pytest.mark.skipif(
                not IS_CUDA_AVAILABLE, reason='requires CUDA support')),
    ])
    def test_nms_quadri(self, device):
        from mmcv.ops import nms_quadri
        np_boxes = np.array([[1.0, 1.0, 3.0, 4.0, 4.0, 4.0, 4.0, 1.0, 0.7],
                             [2.0, 2.0, 3.0, 4.0, 4.0, 2.0, 3.0, 1.0, 0.8],
                             [7.0, 7.0, 8.0, 8.0, 9.0, 7.0, 8.0, 6.0, 0.5],
                             [0.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0, 0.0, 0.9]],
                            dtype=np.float32)

        np_expect_dets = np.array([[0., 0., 0., 2., 2., 2., 2., 0.],
                                   [2., 2., 3., 4., 4., 2., 3., 1.],
                                   [7., 7., 8., 8., 9., 7., 8., 6.]],
                                  dtype=np.float32)
        np_expect_keep_inds = np.array([3, 1, 2], dtype=np.int64)

        boxes = torch.from_numpy(np_boxes).to(device)

        dets, keep_inds = nms_quadri(boxes[:, :8], boxes[:, -1], 0.3)
        assert np.allclose(dets.cpu().numpy()[:, :8], np_expect_dets)
        assert np.allclose(keep_inds.cpu().numpy(), np_expect_keep_inds)

    @pytest.mark.parametrize('device', [
        'cpu',
        pytest.param(
            'cuda',
            marks=pytest.mark.skipif(
                not IS_CUDA_AVAILABLE, reason='requires CUDA support')),
    ])
    def test_batched_nms(self, device):
        # test batched_nms with nms_quadri
        from mmcv.ops import batched_nms

        np_boxes = np.array([[1.0, 1.0, 3.0, 4.0, 4.0, 4.0, 4.0, 1.0, 0.7],
                             [2.0, 2.0, 3.0, 4.0, 4.0, 2.0, 3.0, 1.0, 0.8],
                             [7.0, 7.0, 8.0, 8.0, 9.0, 7.0, 8.0, 6.0, 0.5],
                             [0.0, 0.0, 0.0, 2.0, 2.0, 2.0, 2.0, 0.0, 0.9]],
                            dtype=np.float32)
        np_labels = np.array([1, 0, 1, 0], dtype=np.float32)

        np_expect_agnostic_dets = np.array([[0., 0., 0., 2., 2., 2., 2., 0.],
                                            [2., 2., 3., 4., 4., 2., 3., 1.],
                                            [7., 7., 8., 8., 9., 7., 8., 6.]],
                                           dtype=np.float32)
        np_expect_agnostic_keep_inds = np.array([3, 1, 2], dtype=np.int64)

        np_expect_dets = np.array([[0., 0., 0., 2., 2., 2., 2., 0.],
                                   [2., 2., 3., 4., 4., 2., 3., 1.],
                                   [1., 1., 3., 4., 4., 4., 4., 1.],
                                   [7., 7., 8., 8., 9., 7., 8., 6.]],
                                  dtype=np.float32)
        np_expect_keep_inds = np.array([3, 1, 0, 2], dtype=np.int64)

        nms_cfg = dict(type='nms_quadri', iou_threshold=0.3)

        # test class_agnostic is True
        boxes, keep = batched_nms(
            torch.from_numpy(np_boxes[:, :8]).to(device),
            torch.from_numpy(np_boxes[:, -1]).to(device),
            torch.from_numpy(np_labels).to(device),
            nms_cfg,
            class_agnostic=True)
        assert np.allclose(boxes.cpu().numpy()[:, :8], np_expect_agnostic_dets)
        assert np.allclose(keep.cpu().numpy(), np_expect_agnostic_keep_inds)

        # test class_agnostic is False
        boxes, keep = batched_nms(
            torch.from_numpy(np_boxes[:, :8]).to(device),
            torch.from_numpy(np_boxes[:, -1]).to(device),
            torch.from_numpy(np_labels).to(device),
            nms_cfg,
            class_agnostic=False)
        assert np.allclose(boxes.cpu().numpy()[:, :8], np_expect_dets)
        assert np.allclose(keep.cpu().numpy(), np_expect_keep_inds)