test_group_points.py 9.47 KB
Newer Older
1
# Copyright (c) OpenMMLab. All rights reserved.
2
3
4
5
import pytest
import torch

from mmcv.ops import grouping_operation
limm's avatar
limm committed
6
from mmcv.utils import IS_CUDA_AVAILABLE, IS_NPU_AVAILABLE
7
8


limm's avatar
limm committed
9
10
11
12
13
14
15
16
17
18
@pytest.mark.parametrize('device', [
    pytest.param(
        'cuda',
        marks=pytest.mark.skipif(
            not IS_CUDA_AVAILABLE, reason='requires CUDA support')),
    pytest.param(
        'npu',
        marks=pytest.mark.skipif(
            not IS_NPU_AVAILABLE, reason='requires NPU support'))
])
19
@pytest.mark.parametrize('dtype', [torch.half, torch.float, torch.double])
limm's avatar
limm committed
20
def test_grouping_points(dtype, device):
21
22
23
    idx = torch.tensor([[[0, 0, 0], [3, 3, 3], [8, 8, 8], [0, 0, 0], [0, 0, 0],
                         [0, 0, 0]],
                        [[0, 0, 0], [6, 6, 6], [9, 9, 9], [0, 0, 0], [0, 0, 0],
limm's avatar
limm committed
24
                         [0, 0, 0]]]).int().to(device)
25
    features = torch.tensor([[[
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
        0.5798, -0.7981, -0.9280, -1.3311, 1.3687, 0.9277, -0.4164, -1.8274,
        0.9268, 0.8414
    ],
                              [
                                  5.4247, 1.5113, 2.3944, 1.4740, 5.0300,
                                  5.1030, 1.9360, 2.1939, 2.1581, 3.4666
                              ],
                              [
                                  -1.6266, -1.0281, -1.0393, -1.6931, -1.3982,
                                  -0.5732, -1.0830, -1.7561, -1.6786, -1.6967
                              ]],
                             [[
                                 -0.0380, -0.1880, -1.5724, 0.6905, -0.3190,
                                 0.7798, -0.3693, -0.9457, -0.2942, -1.8527
                             ],
                              [
                                  1.1773, 1.5009, 2.6399, 5.9242, 1.0962,
                                  2.7346, 6.0865, 1.5555, 4.3303, 2.8229
                              ],
                              [
                                  -0.6646, -0.6870, -0.1125, -0.2224, -0.3445,
                                  -1.4049, 0.4990, -0.7037, -0.9924, 0.0386
48
                              ]]],
limm's avatar
limm committed
49
                            dtype=dtype).to(device)
50

51
    output = grouping_operation(features, idx)
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
    expected_output = torch.tensor(
        [[[[0.5798, 0.5798, 0.5798], [-1.3311, -1.3311, -1.3311],
           [0.9268, 0.9268, 0.9268], [0.5798, 0.5798, 0.5798],
           [0.5798, 0.5798, 0.5798], [0.5798, 0.5798, 0.5798]],
          [[5.4247, 5.4247, 5.4247], [1.4740, 1.4740, 1.4740],
           [2.1581, 2.1581, 2.1581], [5.4247, 5.4247, 5.4247],
           [5.4247, 5.4247, 5.4247], [5.4247, 5.4247, 5.4247]],
          [[-1.6266, -1.6266, -1.6266], [-1.6931, -1.6931, -1.6931],
           [-1.6786, -1.6786, -1.6786], [-1.6266, -1.6266, -1.6266],
           [-1.6266, -1.6266, -1.6266], [-1.6266, -1.6266, -1.6266]]],
         [[[-0.0380, -0.0380, -0.0380], [-0.3693, -0.3693, -0.3693],
           [-1.8527, -1.8527, -1.8527], [-0.0380, -0.0380, -0.0380],
           [-0.0380, -0.0380, -0.0380], [-0.0380, -0.0380, -0.0380]],
          [[1.1773, 1.1773, 1.1773], [6.0865, 6.0865, 6.0865],
           [2.8229, 2.8229, 2.8229], [1.1773, 1.1773, 1.1773],
           [1.1773, 1.1773, 1.1773], [1.1773, 1.1773, 1.1773]],
          [[-0.6646, -0.6646, -0.6646], [0.4990, 0.4990, 0.4990],
           [0.0386, 0.0386, 0.0386], [-0.6646, -0.6646, -0.6646],
           [-0.6646, -0.6646, -0.6646], [-0.6646, -0.6646, -0.6646]]]],
limm's avatar
limm committed
71
        dtype=dtype).to(device)
72
    assert torch.allclose(output, expected_output)
73
74


limm's avatar
limm committed
75
76
77
78
79
80
81
82
83
84
@pytest.mark.parametrize('device', [
    pytest.param(
        'cuda',
        marks=pytest.mark.skipif(
            not IS_CUDA_AVAILABLE, reason='requires CUDA support')),
    pytest.param(
        'npu',
        marks=pytest.mark.skipif(
            not IS_NPU_AVAILABLE, reason='requires NPU support'))
])
85
@pytest.mark.parametrize('dtype', [torch.half, torch.float, torch.double])
limm's avatar
limm committed
86
87
88
def test_stack_grouping_points(dtype, device):
    if device == 'npu' and dtype == torch.double:
        return
89
90
    idx = torch.tensor([[0, 0, 0], [3, 3, 3], [8, 8, 8], [1, 1, 1], [0, 0, 0],
                        [2, 2, 2], [0, 0, 0], [6, 6, 6], [9, 9, 9], [0, 0, 0],
limm's avatar
limm committed
91
                        [1, 1, 1], [0, 0, 0]]).int().to(device)
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
    features = torch.tensor([[
        0.5798, -0.7981, -0.9280, -1.3311, 1.3687, 0.9277, -0.4164, -1.8274,
        0.9268, 0.8414
    ],
                             [
                                 5.4247, 1.5113, 2.3944, 1.4740, 5.0300,
                                 5.1030, 1.9360, 2.1939, 2.1581, 3.4666
                             ],
                             [
                                 -1.6266, -1.0281, -1.0393, -1.6931, -1.3982,
                                 -0.5732, -1.0830, -1.7561, -1.6786, -1.6967
                             ],
                             [
                                 -0.0380, -0.1880, -1.5724, 0.6905, -0.3190,
                                 0.7798, -0.3693, -0.9457, -0.2942, -1.8527
                             ],
                             [
                                 1.1773, 1.5009, 2.6399, 5.9242, 1.0962,
                                 2.7346, 6.0865, 1.5555, 4.3303, 2.8229
                             ],
                             [
                                 -0.6646, -0.6870, -0.1125, -0.2224, -0.3445,
                                 -1.4049, 0.4990, -0.7037, -0.9924, 0.0386
115
                             ]],
limm's avatar
limm committed
116
117
118
                            dtype=dtype).to(device)
    features_batch_cnt = torch.tensor([3, 3]).int().to(device)
    indices_batch_cnt = torch.tensor([6, 6]).int().to(device)
119
120
    output = grouping_operation(features, idx, features_batch_cnt,
                                indices_batch_cnt)
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
    expected_output = torch.tensor(
        [[[0.5798, 0.5798, 0.5798], [-0.7981, -0.7981, -0.7981],
          [-0.9280, -0.9280, -0.9280], [-1.3311, -1.3311, -1.3311],
          [1.3687, 1.3687, 1.3687], [0.9277, 0.9277, 0.9277],
          [-0.4164, -0.4164, -0.4164], [-1.8274, -1.8274, -1.8274],
          [0.9268, 0.9268, 0.9268], [0.8414, 0.8414, 0.8414]],
         [[0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000],
          [0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000],
          [0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000],
          [0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000],
          [0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000]],
         [[0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000],
          [0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000],
          [0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000],
          [0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000],
          [0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000]],
         [[5.4247, 5.4247, 5.4247], [1.5113, 1.5113, 1.5113],
          [2.3944, 2.3944, 2.3944], [1.4740, 1.4740, 1.4740],
          [5.0300, 5.0300, 5.0300], [5.1030, 5.1030, 5.1030],
          [1.9360, 1.9360, 1.9360], [2.1939, 2.1939, 2.1939],
          [2.1581, 2.1581, 2.1581], [3.4666, 3.4666, 3.4666]],
         [[0.5798, 0.5798, 0.5798], [-0.7981, -0.7981, -0.7981],
          [-0.9280, -0.9280, -0.9280], [-1.3311, -1.3311, -1.3311],
          [1.3687, 1.3687, 1.3687], [0.9277, 0.9277, 0.9277],
          [-0.4164, -0.4164, -0.4164], [-1.8274, -1.8274, -1.8274],
          [0.9268, 0.9268, 0.9268], [0.8414, 0.8414, 0.8414]],
         [[-1.6266, -1.6266, -1.6266], [-1.0281, -1.0281, -1.0281],
          [-1.0393, -1.0393, -1.0393], [-1.6931, -1.6931, -1.6931],
          [-1.3982, -1.3982, -1.3982], [-0.5732, -0.5732, -0.5732],
          [-1.0830, -1.0830, -1.0830], [-1.7561, -1.7561, -1.7561],
          [-1.6786, -1.6786, -1.6786], [-1.6967, -1.6967, -1.6967]],
         [[-0.0380, -0.0380, -0.0380], [-0.1880, -0.1880, -0.1880],
          [-1.5724, -1.5724, -1.5724], [0.6905, 0.6905, 0.6905],
          [-0.3190, -0.3190, -0.3190], [0.7798, 0.7798, 0.7798],
          [-0.3693, -0.3693, -0.3693], [-0.9457, -0.9457, -0.9457],
          [-0.2942, -0.2942, -0.2942], [-1.8527, -1.8527, -1.8527]],
         [[0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000],
          [0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000],
          [0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000],
          [0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000],
          [0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000]],
         [[0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000],
          [0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000],
          [0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000],
          [0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000],
          [0.0000, 0.0000, 0.0000], [0.0000, 0.0000, 0.0000]],
         [[-0.0380, -0.0380, -0.0380], [-0.1880, -0.1880, -0.1880],
          [-1.5724, -1.5724, -1.5724], [0.6905, 0.6905, 0.6905],
          [-0.3190, -0.3190, -0.3190], [0.7798, 0.7798, 0.7798],
          [-0.3693, -0.3693, -0.3693], [-0.9457, -0.9457, -0.9457],
          [-0.2942, -0.2942, -0.2942], [-1.8527, -1.8527, -1.8527]],
         [[1.1773, 1.1773, 1.1773], [1.5009, 1.5009, 1.5009],
          [2.6399, 2.6399, 2.6399], [5.9242, 5.9242, 5.9242],
          [1.0962, 1.0962, 1.0962], [2.7346, 2.7346, 2.7346],
          [6.0865, 6.0865, 6.0865], [1.5555, 1.5555, 1.5555],
          [4.3303, 4.3303, 4.3303], [2.8229, 2.8229, 2.8229]],
         [[-0.0380, -0.0380, -0.0380], [-0.1880, -0.1880, -0.1880],
          [-1.5724, -1.5724, -1.5724], [0.6905, 0.6905, 0.6905],
          [-0.3190, -0.3190, -0.3190], [0.7798, 0.7798, 0.7798],
          [-0.3693, -0.3693, -0.3693], [-0.9457, -0.9457, -0.9457],
          [-0.2942, -0.2942, -0.2942], [-1.8527, -1.8527, -1.8527]]],
limm's avatar
limm committed
182
        dtype=dtype).to(device)
183
    assert torch.allclose(output, expected_output)