pillar_encoder.py 13.4 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
zhangwenwei's avatar
zhangwenwei committed
2
import torch
3
from mmcv.cnn import build_norm_layer
4
from mmcv.ops import DynamicScatter
zhangwenwei's avatar
zhangwenwei committed
5
6
from torch import nn

7
from mmdet3d.registry import MODELS
zhangwenwei's avatar
zhangwenwei committed
8
9
10
from .utils import PFNLayer, get_paddings_indicator


11
@MODELS.register_module()
zhangwenwei's avatar
zhangwenwei committed
12
class PillarFeatureNet(nn.Module):
zhangwenwei's avatar
zhangwenwei committed
13
14
15
16
    """Pillar Feature Net.

    The network prepares the pillar features and performs forward pass
    through PFNLayers.
17

zhangwenwei's avatar
zhangwenwei committed
18
    Args:
zhangwenwei's avatar
zhangwenwei committed
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
        in_channels (int, optional): Number of input features,
            either x, y, z or x, y, z, r. Defaults to 4.
        feat_channels (tuple, optional): Number of features in each of the
            N PFNLayers. Defaults to (64, ).
        with_distance (bool, optional): Whether to include Euclidean distance
            to points. Defaults to False.
        with_cluster_center (bool, optional): [description]. Defaults to True.
        with_voxel_center (bool, optional): [description]. Defaults to True.
        voxel_size (tuple[float], optional): Size of voxels, only utilize x
            and y size. Defaults to (0.2, 0.2, 4).
        point_cloud_range (tuple[float], optional): Point cloud range, only
            utilizes x and y min. Defaults to (0, -40, -3, 70.4, 40, 1).
        norm_cfg ([type], optional): [description].
            Defaults to dict(type='BN1d', eps=1e-3, momentum=0.01).
        mode (str, optional): The mode to gather point features. Options are
            'max' or 'avg'. Defaults to 'max'.
35
        legacy (bool, optional): Whether to use the new behavior or
36
            the original behavior. Defaults to True.
zhangwenwei's avatar
zhangwenwei committed
37
    """
zhangwenwei's avatar
zhangwenwei committed
38
39

    def __init__(self,
zhangwenwei's avatar
zhangwenwei committed
40
41
                 in_channels=4,
                 feat_channels=(64, ),
zhangwenwei's avatar
zhangwenwei committed
42
43
44
45
46
                 with_distance=False,
                 with_cluster_center=True,
                 with_voxel_center=True,
                 voxel_size=(0.2, 0.2, 4),
                 point_cloud_range=(0, -40, -3, 70.4, 40, 1),
zhangwenwei's avatar
zhangwenwei committed
47
                 norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01),
48
49
                 mode='max',
                 legacy=True):
zhangwenwei's avatar
zhangwenwei committed
50
        super(PillarFeatureNet, self).__init__()
zhangwenwei's avatar
zhangwenwei committed
51
        assert len(feat_channels) > 0
52
        self.legacy = legacy
zhangwenwei's avatar
zhangwenwei committed
53
        if with_cluster_center:
zhangwenwei's avatar
zhangwenwei committed
54
            in_channels += 3
zhangwenwei's avatar
zhangwenwei committed
55
        if with_voxel_center:
56
            in_channels += 3
zhangwenwei's avatar
zhangwenwei committed
57
        if with_distance:
zhangwenwei's avatar
zhangwenwei committed
58
            in_channels += 1
zhangwenwei's avatar
zhangwenwei committed
59
60
61
62
        self._with_distance = with_distance
        self._with_cluster_center = with_cluster_center
        self._with_voxel_center = with_voxel_center
        # Create PillarFeatureNet layers
zhangwenwei's avatar
zhangwenwei committed
63
64
        self.in_channels = in_channels
        feat_channels = [in_channels] + list(feat_channels)
zhangwenwei's avatar
zhangwenwei committed
65
        pfn_layers = []
zhangwenwei's avatar
zhangwenwei committed
66
67
68
69
        for i in range(len(feat_channels) - 1):
            in_filters = feat_channels[i]
            out_filters = feat_channels[i + 1]
            if i < len(feat_channels) - 2:
zhangwenwei's avatar
zhangwenwei committed
70
71
72
73
74
75
76
                last_layer = False
            else:
                last_layer = True
            pfn_layers.append(
                PFNLayer(
                    in_filters,
                    out_filters,
zhangwenwei's avatar
zhangwenwei committed
77
                    norm_cfg=norm_cfg,
zhangwenwei's avatar
zhangwenwei committed
78
79
80
81
82
83
84
                    last_layer=last_layer,
                    mode=mode))
        self.pfn_layers = nn.ModuleList(pfn_layers)

        # Need pillar (voxel) size and x/y offset in order to calculate offset
        self.vx = voxel_size[0]
        self.vy = voxel_size[1]
85
        self.vz = voxel_size[2]
zhangwenwei's avatar
zhangwenwei committed
86
87
        self.x_offset = self.vx / 2 + point_cloud_range[0]
        self.y_offset = self.vy / 2 + point_cloud_range[1]
88
        self.z_offset = self.vz / 2 + point_cloud_range[2]
zhangwenwei's avatar
zhangwenwei committed
89
90
        self.point_cloud_range = point_cloud_range

91
    def forward(self, features, num_points, coors, *args, **kwargs):
zhangwenwei's avatar
zhangwenwei committed
92
        """Forward function.
zhangwenwei's avatar
zhangwenwei committed
93
94
95
96
97

        Args:
            features (torch.Tensor): Point features or raw points in shape
                (N, M, C).
            num_points (torch.Tensor): Number of points in each pillar.
98
            coors (torch.Tensor): Coordinates of each voxel.
99

zhangwenwei's avatar
zhangwenwei committed
100
101
102
        Returns:
            torch.Tensor: Features of pillars.
        """
zhangwenwei's avatar
zhangwenwei committed
103
104
105
106
107
108
109
110
111
112
        features_ls = [features]
        # Find distance of x, y, and z from cluster center
        if self._with_cluster_center:
            points_mean = features[:, :, :3].sum(
                dim=1, keepdim=True) / num_points.type_as(features).view(
                    -1, 1, 1)
            f_cluster = features[:, :, :3] - points_mean
            features_ls.append(f_cluster)

        # Find distance of x, y, and z from pillar center
113
        dtype = features.dtype
zhangwenwei's avatar
zhangwenwei committed
114
        if self._with_voxel_center:
115
            if not self.legacy:
116
                f_center = torch.zeros_like(features[:, :, :3])
117
118
119
120
121
122
                f_center[:, :, 0] = features[:, :, 0] - (
                    coors[:, 3].to(dtype).unsqueeze(1) * self.vx +
                    self.x_offset)
                f_center[:, :, 1] = features[:, :, 1] - (
                    coors[:, 2].to(dtype).unsqueeze(1) * self.vy +
                    self.y_offset)
123
124
125
                f_center[:, :, 2] = features[:, :, 2] - (
                    coors[:, 1].to(dtype).unsqueeze(1) * self.vz +
                    self.z_offset)
126
            else:
127
                f_center = features[:, :, :3]
128
129
130
131
132
133
                f_center[:, :, 0] = f_center[:, :, 0] - (
                    coors[:, 3].type_as(features).unsqueeze(1) * self.vx +
                    self.x_offset)
                f_center[:, :, 1] = f_center[:, :, 1] - (
                    coors[:, 2].type_as(features).unsqueeze(1) * self.vy +
                    self.y_offset)
134
135
136
                f_center[:, :, 2] = f_center[:, :, 2] - (
                    coors[:, 1].type_as(features).unsqueeze(1) * self.vz +
                    self.z_offset)
zhangwenwei's avatar
zhangwenwei committed
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
            features_ls.append(f_center)

        if self._with_distance:
            points_dist = torch.norm(features[:, :, :3], 2, 2, keepdim=True)
            features_ls.append(points_dist)

        # Combine together feature decorations
        features = torch.cat(features_ls, dim=-1)
        # The feature decorations were calculated without regard to whether
        # pillar was empty. Need to ensure that
        # empty pillars remain set to zeros.
        voxel_count = features.shape[1]
        mask = get_paddings_indicator(num_points, voxel_count, axis=0)
        mask = torch.unsqueeze(mask, -1).type_as(features)
        features *= mask

        for pfn in self.pfn_layers:
            features = pfn(features, num_points)

156
        return features.squeeze(1)
zhangwenwei's avatar
zhangwenwei committed
157
158


159
@MODELS.register_module()
zhangwenwei's avatar
zhangwenwei committed
160
class DynamicPillarFeatureNet(PillarFeatureNet):
zhangwenwei's avatar
zhangwenwei committed
161
    """Pillar Feature Net using dynamic voxelization.
zhangwenwei's avatar
zhangwenwei committed
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184

    The network prepares the pillar features and performs forward pass
    through PFNLayers. The main difference is that it is used for
    dynamic voxels, which contains different number of points inside a voxel
    without limits.

    Args:
        in_channels (int, optional): Number of input features,
            either x, y, z or x, y, z, r. Defaults to 4.
        feat_channels (tuple, optional): Number of features in each of the
            N PFNLayers. Defaults to (64, ).
        with_distance (bool, optional): Whether to include Euclidean distance
            to points. Defaults to False.
        with_cluster_center (bool, optional): [description]. Defaults to True.
        with_voxel_center (bool, optional): [description]. Defaults to True.
        voxel_size (tuple[float], optional): Size of voxels, only utilize x
            and y size. Defaults to (0.2, 0.2, 4).
        point_cloud_range (tuple[float], optional): Point cloud range, only
            utilizes x and y min. Defaults to (0, -40, -3, 70.4, 40, 1).
        norm_cfg ([type], optional): [description].
            Defaults to dict(type='BN1d', eps=1e-3, momentum=0.01).
        mode (str, optional): The mode to gather point features. Options are
            'max' or 'avg'. Defaults to 'max'.
185
186
        legacy (bool, optional): Whether to use the new behavior or
            the original behavior. Defaults to True.
zhangwenwei's avatar
zhangwenwei committed
187
    """
zhangwenwei's avatar
zhangwenwei committed
188
189

    def __init__(self,
zhangwenwei's avatar
zhangwenwei committed
190
191
                 in_channels=4,
                 feat_channels=(64, ),
zhangwenwei's avatar
zhangwenwei committed
192
193
194
195
196
197
                 with_distance=False,
                 with_cluster_center=True,
                 with_voxel_center=True,
                 voxel_size=(0.2, 0.2, 4),
                 point_cloud_range=(0, -40, -3, 70.4, 40, 1),
                 norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01),
198
199
                 mode='max',
                 legacy=True):
zhangwenwei's avatar
zhangwenwei committed
200
        super(DynamicPillarFeatureNet, self).__init__(
zhangwenwei's avatar
zhangwenwei committed
201
202
            in_channels,
            feat_channels,
zhangwenwei's avatar
zhangwenwei committed
203
204
205
206
207
            with_distance,
            with_cluster_center=with_cluster_center,
            with_voxel_center=with_voxel_center,
            voxel_size=voxel_size,
            point_cloud_range=point_cloud_range,
zhangwenwei's avatar
zhangwenwei committed
208
            norm_cfg=norm_cfg,
209
210
            mode=mode,
            legacy=legacy)
zhangwenwei's avatar
zhangwenwei committed
211
        feat_channels = [self.in_channels] + list(feat_channels)
zhangwenwei's avatar
zhangwenwei committed
212
213
214
        pfn_layers = []
        # TODO: currently only support one PFNLayer

zhangwenwei's avatar
zhangwenwei committed
215
216
217
        for i in range(len(feat_channels) - 1):
            in_filters = feat_channels[i]
            out_filters = feat_channels[i + 1]
zhangwenwei's avatar
zhangwenwei committed
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
            if i > 0:
                in_filters *= 2
            norm_name, norm_layer = build_norm_layer(norm_cfg, out_filters)
            pfn_layers.append(
                nn.Sequential(
                    nn.Linear(in_filters, out_filters, bias=False), norm_layer,
                    nn.ReLU(inplace=True)))
        self.num_pfn = len(pfn_layers)
        self.pfn_layers = nn.ModuleList(pfn_layers)
        self.pfn_scatter = DynamicScatter(voxel_size, point_cloud_range,
                                          (mode != 'max'))
        self.cluster_scatter = DynamicScatter(
            voxel_size, point_cloud_range, average_points=True)

    def map_voxel_center_to_point(self, pts_coors, voxel_mean, voxel_coors):
zhangwenwei's avatar
zhangwenwei committed
233
        """Map the centers of voxels to its corresponding points.
zhangwenwei's avatar
zhangwenwei committed
234
235
236
237

        Args:
            pts_coors (torch.Tensor): The coordinates of each points, shape
                (M, 3), where M is the number of points.
238
            voxel_mean (torch.Tensor): The mean or aggregated features of a
zhangwenwei's avatar
zhangwenwei committed
239
240
241
242
243
                voxel, shape (N, C), where N is the number of voxels.
            voxel_coors (torch.Tensor): The coordinates of each voxel.

        Returns:
            torch.Tensor: Corresponding voxel centers of each points, shape
244
                (M, C), where M is the number of points.
zhangwenwei's avatar
zhangwenwei committed
245
        """
zhangwenwei's avatar
zhangwenwei committed
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
        # Step 1: scatter voxel into canvas
        # Calculate necessary things for canvas creation
        canvas_y = int(
            (self.point_cloud_range[4] - self.point_cloud_range[1]) / self.vy)
        canvas_x = int(
            (self.point_cloud_range[3] - self.point_cloud_range[0]) / self.vx)
        canvas_channel = voxel_mean.size(1)
        batch_size = pts_coors[-1, 0] + 1
        canvas_len = canvas_y * canvas_x * batch_size
        # Create the canvas for this sample
        canvas = voxel_mean.new_zeros(canvas_channel, canvas_len)
        # Only include non-empty pillars
        indices = (
            voxel_coors[:, 0] * canvas_y * canvas_x +
            voxel_coors[:, 2] * canvas_x + voxel_coors[:, 3])
        # Scatter the blob back to the canvas
        canvas[:, indices.long()] = voxel_mean.t()

        # Step 2: get voxel mean for each point
        voxel_index = (
            pts_coors[:, 0] * canvas_y * canvas_x +
            pts_coors[:, 2] * canvas_x + pts_coors[:, 3])
        center_per_point = canvas[:, voxel_index.long()].t()
        return center_per_point

    def forward(self, features, coors):
zhangwenwei's avatar
zhangwenwei committed
272
        """Forward function.
zhangwenwei's avatar
zhangwenwei committed
273
274
275
276
277
278
279
280

        Args:
            features (torch.Tensor): Point features or raw points in shape
                (N, M, C).
            coors (torch.Tensor): Coordinates of each voxel

        Returns:
            torch.Tensor: Features of pillars.
zhangwenwei's avatar
zhangwenwei committed
281
282
283
284
285
286
287
288
289
290
291
292
293
        """
        features_ls = [features]
        # Find distance of x, y, and z from cluster center
        if self._with_cluster_center:
            voxel_mean, mean_coors = self.cluster_scatter(features, coors)
            points_mean = self.map_voxel_center_to_point(
                coors, voxel_mean, mean_coors)
            # TODO: maybe also do cluster for reflectivity
            f_cluster = features[:, :3] - points_mean[:, :3]
            features_ls.append(f_cluster)

        # Find distance of x, y, and z from pillar center
        if self._with_voxel_center:
294
            f_center = features.new_zeros(size=(features.size(0), 3))
zhangwenwei's avatar
zhangwenwei committed
295
296
297
298
            f_center[:, 0] = features[:, 0] - (
                coors[:, 3].type_as(features) * self.vx + self.x_offset)
            f_center[:, 1] = features[:, 1] - (
                coors[:, 2].type_as(features) * self.vy + self.y_offset)
299
300
            f_center[:, 2] = features[:, 2] - (
                coors[:, 1].type_as(features) * self.vz + self.z_offset)
zhangwenwei's avatar
zhangwenwei committed
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
            features_ls.append(f_center)

        if self._with_distance:
            points_dist = torch.norm(features[:, :3], 2, 1, keepdim=True)
            features_ls.append(points_dist)

        # Combine together feature decorations
        features = torch.cat(features_ls, dim=-1)
        for i, pfn in enumerate(self.pfn_layers):
            point_feats = pfn(features)
            voxel_feats, voxel_coors = self.pfn_scatter(point_feats, coors)
            if i != len(self.pfn_layers) - 1:
                # need to concat voxel feats if it is not the last pfn
                feat_per_point = self.map_voxel_center_to_point(
                    coors, voxel_feats, voxel_coors)
                features = torch.cat([point_feats, feat_per_point], dim=1)

        return voxel_feats, voxel_coors