sparse_unet.py 16.1 KB
Newer Older
wuyuefeng's avatar
wuyuefeng committed
1
2
3
4
import torch
import torch.nn as nn

import mmdet3d.ops.spconv as spconv
wuyuefeng's avatar
wuyuefeng committed
5
from mmdet3d.ops import SparseBasicBlock
wuyuefeng's avatar
wuyuefeng committed
6
7
8
9
10
from mmdet.ops import build_norm_layer
from ..registry import MIDDLE_ENCODERS


@MIDDLE_ENCODERS.register_module
wuyuefeng's avatar
wuyuefeng committed
11
class SparseUnet(nn.Module):
wuyuefeng's avatar
wuyuefeng committed
12
13
14
15

    def __init__(self,
                 in_channels,
                 output_shape,
wuyuefeng's avatar
wuyuefeng committed
16
                 pre_act=False,
wuyuefeng's avatar
wuyuefeng committed
17
18
                 norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01),
                 base_channels=16,
19
20
21
22
23
24
25
26
                 output_channels=128,
                 encoder_channels=((16, ), (32, 32, 32), (64, 64, 64), (64, 64,
                                                                        64)),
                 encoder_paddings=((1, ), (1, 1, 1), (1, 1, 1), ((0, 1, 1), 1,
                                                                 1)),
                 decoder_channels=((64, 64, 64), (64, 64, 32), (32, 32, 16),
                                   (16, 16, 16)),
                 decoder_paddings=((1, 0), (1, 0), (0, 0), (0, 1))):
wuyuefeng's avatar
wuyuefeng committed
27
28
        """SparseUnet for PartA^2

wuyuefeng's avatar
wuyuefeng committed
29
30
        See https://arxiv.org/abs/1907.03670 for more detials.

wuyuefeng's avatar
wuyuefeng committed
31
32
33
34
        Args:
            in_channels (int): the number of input channels
            output_shape (list[int]): the shape of output tensor
            pre_act (bool): use pre_act_block or post_act_block
35
            norm_cfg (dict): config of normalization layer
wuyuefeng's avatar
wuyuefeng committed
36
            base_channels (int): out channels for conv_input layer
37
38
39
40
41
            output_channels (int): out channels for conv_out layer
            encoder_channels (tuple[tuple[int]]):
                conv channels of each encode block
            encoder_paddings (tuple[tuple[int]]): paddings of each encode block
            decoder_channels (tuple[tuple[int]]):
wuyuefeng's avatar
wuyuefeng committed
42
                conv channels of each decode block
43
            decoder_paddings (tuple[tuple[int]]): paddings of each decode block
wuyuefeng's avatar
wuyuefeng committed
44
45
46
47
48
49
        """
        super().__init__()
        self.sparse_shape = output_shape
        self.output_shape = output_shape
        self.in_channels = in_channels
        self.pre_act = pre_act
wuyuefeng's avatar
wuyuefeng committed
50
        self.base_channels = base_channels
51
52
53
54
55
56
        self.output_channels = output_channels
        self.encoder_channels = encoder_channels
        self.encoder_paddings = encoder_paddings
        self.decoder_channels = decoder_channels
        self.decoder_paddings = decoder_paddings
        self.stage_num = len(self.encoder_channels)
wuyuefeng's avatar
wuyuefeng committed
57
58
59
        # Spconv init all weight on its own

        if pre_act:
wuyuefeng's avatar
wuyuefeng committed
60
            # TODO: use ConvModule to encapsulate
wuyuefeng's avatar
wuyuefeng committed
61
62
63
            self.conv_input = spconv.SparseSequential(
                spconv.SubMConv3d(
                    in_channels,
wuyuefeng's avatar
wuyuefeng committed
64
                    self.base_channels,
wuyuefeng's avatar
wuyuefeng committed
65
66
67
68
                    3,
                    padding=1,
                    bias=False,
                    indice_key='subm1'), )
wuyuefeng's avatar
wuyuefeng committed
69
            make_block = self.pre_act_block
wuyuefeng's avatar
wuyuefeng committed
70
71
72
73
        else:
            self.conv_input = spconv.SparseSequential(
                spconv.SubMConv3d(
                    in_channels,
wuyuefeng's avatar
wuyuefeng committed
74
                    self.base_channels,
wuyuefeng's avatar
wuyuefeng committed
75
76
77
78
                    3,
                    padding=1,
                    bias=False,
                    indice_key='subm1'),
79
                build_norm_layer(norm_cfg, self.base_channels)[1],
wuyuefeng's avatar
wuyuefeng committed
80
81
                nn.ReLU(),
            )
wuyuefeng's avatar
wuyuefeng committed
82
            make_block = self.post_act_block
wuyuefeng's avatar
wuyuefeng committed
83

84
85
86
        encoder_out_channels = self.make_encoder_layers(
            make_block, norm_cfg, self.base_channels)
        self.make_decoder_layers(make_block, norm_cfg, encoder_out_channels)
wuyuefeng's avatar
wuyuefeng committed
87
88
89
90

        self.conv_out = spconv.SparseSequential(
            # [200, 176, 5] -> [200, 176, 2]
            spconv.SparseConv3d(
wuyuefeng's avatar
wuyuefeng committed
91
                encoder_out_channels,
92
                self.output_channels, (3, 1, 1),
wuyuefeng's avatar
wuyuefeng committed
93
94
95
96
                stride=(2, 1, 1),
                padding=0,
                bias=False,
                indice_key='spconv_down2'),
97
            build_norm_layer(norm_cfg, self.output_channels)[1],
wuyuefeng's avatar
wuyuefeng committed
98
99
100
101
            nn.ReLU(),
        )

    def forward(self, voxel_features, coors, batch_size):
wuyuefeng's avatar
wuyuefeng committed
102
        """Forward of SparseUnet
wuyuefeng's avatar
wuyuefeng committed
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117

        Args:
            voxel_features (torch.float32): shape [N, C]
            coors (torch.int32): shape [N, 4](batch_idx, z_idx, y_idx, x_idx)
            batch_size (int): batch size

        Returns:
            dict: backbone features
        """
        coors = coors.int()
        input_sp_tensor = spconv.SparseConvTensor(voxel_features, coors,
                                                  self.sparse_shape,
                                                  batch_size)
        x = self.conv_input(input_sp_tensor)

wuyuefeng's avatar
wuyuefeng committed
118
        encode_features = []
wuyuefeng's avatar
wuyuefeng committed
119
120
        for encoder_layer in self.encoder_layers:
            x = encoder_layer(x)
wuyuefeng's avatar
wuyuefeng committed
121
            encode_features.append(x)
wuyuefeng's avatar
wuyuefeng committed
122
123
124

        # for detection head
        # [200, 176, 5] -> [200, 176, 2]
wuyuefeng's avatar
wuyuefeng committed
125
        out = self.conv_out(encode_features[-1])
wuyuefeng's avatar
wuyuefeng committed
126
127
128
129
130
131
132
        spatial_features = out.dense()

        N, C, D, H, W = spatial_features.shape
        spatial_features = spatial_features.view(N, C * D, H, W)

        ret = {'spatial_features': spatial_features}

wuyuefeng's avatar
wuyuefeng committed
133
        # for segmentation head, with output shape:
wuyuefeng's avatar
wuyuefeng committed
134
135
136
137
        # [400, 352, 11] <- [200, 176, 5]
        # [800, 704, 21] <- [400, 352, 11]
        # [1600, 1408, 41] <- [800, 704, 21]
        # [1600, 1408, 41] <- [1600, 1408, 41]
wuyuefeng's avatar
wuyuefeng committed
138
139
140
        decode_features = []
        x = encode_features[-1]
        for i in range(self.stage_num, 0, -1):
141
            x = self.decoder_layer_forward(
wuyuefeng's avatar
wuyuefeng committed
142
143
                encode_features[i - 1],
                x,
144
145
146
                getattr(self, f'lateral_layer{i}'),
                getattr(self, f'merge_layer{i}'),
                getattr(self, f'upsample_layer{i}'),
wuyuefeng's avatar
wuyuefeng committed
147
148
            )
            decode_features.append(x)
wuyuefeng's avatar
wuyuefeng committed
149

wuyuefeng's avatar
wuyuefeng committed
150
        seg_features = decode_features[-1].features
wuyuefeng's avatar
wuyuefeng committed
151

wuyuefeng's avatar
wuyuefeng committed
152
        ret.update({'seg_features': seg_features})
wuyuefeng's avatar
wuyuefeng committed
153
154
155

        return ret

wuyuefeng's avatar
wuyuefeng committed
156
157
    def decoder_layer_forward(self, x_lateral, x_bottom, lateral_layer,
                              merge_layer, upsample_layer):
wuyuefeng's avatar
wuyuefeng committed
158
159
160
161
162
        """Forward of upsample and residual block.

        Args:
            x_lateral (SparseConvTensor): lateral tensor
            x_bottom (SparseConvTensor): tensor from bottom layer
wuyuefeng's avatar
wuyuefeng committed
163
164
165
            lateral_layer (SparseBasicBlock): convolution for lateral tensor
            merge_layer (SparseSequential): convolution for merging features
            upsample_layer (SparseSequential): convolution for upsampling
wuyuefeng's avatar
wuyuefeng committed
166
167
168
169

        Returns:
            SparseConvTensor: upsampled feature
        """
wuyuefeng's avatar
wuyuefeng committed
170
        x_trans = lateral_layer(x_lateral)
wuyuefeng's avatar
wuyuefeng committed
171
172
        x = x_trans
        x.features = torch.cat((x_bottom.features, x_trans.features), dim=1)
wuyuefeng's avatar
wuyuefeng committed
173
174
        x_m = merge_layer(x)
        x = self.reduce_channel(x, x_m.features.shape[1])
wuyuefeng's avatar
wuyuefeng committed
175
        x.features = x_m.features + x.features
wuyuefeng's avatar
wuyuefeng committed
176
        x = upsample_layer(x)
wuyuefeng's avatar
wuyuefeng committed
177
178
179
        return x

    @staticmethod
wuyuefeng's avatar
wuyuefeng committed
180
181
    def reduce_channel(x, out_channels):
        """reduce channel for element-wise addition.
wuyuefeng's avatar
wuyuefeng committed
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216

        Args:
            x (SparseConvTensor): x.features (N, C1)
            out_channels (int): the number of channel after reduction

        Returns:
            SparseConvTensor: channel reduced feature
        """
        features = x.features
        n, in_channels = features.shape
        assert (in_channels %
                out_channels == 0) and (in_channels >= out_channels)

        x.features = features.view(n, out_channels, -1).sum(dim=2)
        return x

    def pre_act_block(self,
                      in_channels,
                      out_channels,
                      kernel_size,
                      indice_key=None,
                      stride=1,
                      padding=0,
                      conv_type='subm',
                      norm_cfg=None):
        """Make pre activate sparse convolution block.

        Args:
            in_channels (int): the number of input channels
            out_channels (int): the number of out channels
            kernel_size (int): kernel size of convolution
            indice_key (str): the indice key used for sparse tensor
            stride (int): the stride of convolution
            padding (int or list[int]): the padding number of input
            conv_type (str): conv type in 'subm', 'spconv' or 'inverseconv'
217
            norm_cfg (dict): config of normalization layer
wuyuefeng's avatar
wuyuefeng committed
218
219
220
221

        Returns:
            spconv.SparseSequential: pre activate sparse convolution block.
        """
wuyuefeng's avatar
wuyuefeng committed
222
        # TODO: use ConvModule to encapsulate
wuyuefeng's avatar
wuyuefeng committed
223
224
225
226
        assert conv_type in ['subm', 'spconv', 'inverseconv']

        if conv_type == 'subm':
            m = spconv.SparseSequential(
227
                build_norm_layer(norm_cfg, in_channels)[1],
wuyuefeng's avatar
wuyuefeng committed
228
229
230
231
232
233
234
235
236
237
238
                nn.ReLU(inplace=True),
                spconv.SubMConv3d(
                    in_channels,
                    out_channels,
                    kernel_size,
                    padding=padding,
                    bias=False,
                    indice_key=indice_key),
            )
        elif conv_type == 'spconv':
            m = spconv.SparseSequential(
239
                build_norm_layer(norm_cfg, in_channels)[1],
wuyuefeng's avatar
wuyuefeng committed
240
241
242
243
244
245
246
247
248
249
250
251
                nn.ReLU(inplace=True),
                spconv.SparseConv3d(
                    in_channels,
                    out_channels,
                    kernel_size,
                    stride=stride,
                    padding=padding,
                    bias=False,
                    indice_key=indice_key),
            )
        elif conv_type == 'inverseconv':
            m = spconv.SparseSequential(
252
                build_norm_layer(norm_cfg, in_channels)[1],
wuyuefeng's avatar
wuyuefeng committed
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
                nn.ReLU(inplace=True),
                spconv.SparseInverseConv3d(
                    in_channels,
                    out_channels,
                    kernel_size,
                    bias=False,
                    indice_key=indice_key),
            )
        else:
            raise NotImplementedError
        return m

    def post_act_block(self,
                       in_channels,
                       out_channels,
                       kernel_size,
                       indice_key,
                       stride=1,
                       padding=0,
                       conv_type='subm',
                       norm_cfg=None):
        """Make post activate sparse convolution block.

        Args:
            in_channels (int): the number of input channels
            out_channels (int): the number of out channels
            kernel_size (int): kernel size of convolution
            indice_key (str): the indice key used for sparse tensor
            stride (int): the stride of convolution
            padding (int or list[int]): the padding number of input
            conv_type (str): conv type in 'subm', 'spconv' or 'inverseconv'
284
            norm_cfg (dict[str]): config of normalization layer
wuyuefeng's avatar
wuyuefeng committed
285
286
287
288

        Returns:
            spconv.SparseSequential: post activate sparse convolution block.
        """
wuyuefeng's avatar
wuyuefeng committed
289
        # TODO: use ConvModule to encapsulate
wuyuefeng's avatar
wuyuefeng committed
290
291
292
293
294
295
296
297
298
299
        assert conv_type in ['subm', 'spconv', 'inverseconv']

        if conv_type == 'subm':
            m = spconv.SparseSequential(
                spconv.SubMConv3d(
                    in_channels,
                    out_channels,
                    kernel_size,
                    bias=False,
                    indice_key=indice_key),
300
                build_norm_layer(norm_cfg, out_channels)[1],
wuyuefeng's avatar
wuyuefeng committed
301
302
303
304
305
306
307
308
309
310
311
312
                nn.ReLU(inplace=True),
            )
        elif conv_type == 'spconv':
            m = spconv.SparseSequential(
                spconv.SparseConv3d(
                    in_channels,
                    out_channels,
                    kernel_size,
                    stride=stride,
                    padding=padding,
                    bias=False,
                    indice_key=indice_key),
313
                build_norm_layer(norm_cfg, out_channels)[1],
wuyuefeng's avatar
wuyuefeng committed
314
315
316
317
318
319
320
321
322
323
                nn.ReLU(inplace=True),
            )
        elif conv_type == 'inverseconv':
            m = spconv.SparseSequential(
                spconv.SparseInverseConv3d(
                    in_channels,
                    out_channels,
                    kernel_size,
                    bias=False,
                    indice_key=indice_key),
324
                build_norm_layer(norm_cfg, out_channels)[1],
wuyuefeng's avatar
wuyuefeng committed
325
326
327
328
329
                nn.ReLU(inplace=True),
            )
        else:
            raise NotImplementedError
        return m
wuyuefeng's avatar
wuyuefeng committed
330

331
332
    def make_encoder_layers(self, make_block, norm_cfg, in_channels):
        """make encoder layers using sparse convs
wuyuefeng's avatar
wuyuefeng committed
333
334
335

        Args:
            make_block (method): a bounded function to build blocks
336
            norm_cfg (dict[str]): config of normalization layer
wuyuefeng's avatar
wuyuefeng committed
337
338
339
340
341
            in_channels (int): the number of encoder input channels

        Returns:
            int: the number of encoder output channels
        """
wuyuefeng's avatar
wuyuefeng committed
342
        self.encoder_layers = spconv.SparseSequential()
343
        for i, blocks in enumerate(self.encoder_channels):
wuyuefeng's avatar
wuyuefeng committed
344
345
            blocks_list = []
            for j, out_channels in enumerate(tuple(blocks)):
346
                padding = tuple(self.encoder_paddings[i])[j]
wuyuefeng's avatar
wuyuefeng committed
347
348
349
350
351
352
353
354
355
356
357
                # each stage started with a spconv layer
                # except the first stage
                if i != 0 and j == 0:
                    blocks_list.append(
                        make_block(
                            in_channels,
                            out_channels,
                            3,
                            norm_cfg=norm_cfg,
                            stride=2,
                            padding=padding,
358
                            indice_key=f'spconv{i + 1}',
wuyuefeng's avatar
wuyuefeng committed
359
360
361
362
363
364
365
366
367
                            conv_type='spconv'))
                else:
                    blocks_list.append(
                        make_block(
                            in_channels,
                            out_channels,
                            3,
                            norm_cfg=norm_cfg,
                            padding=padding,
368
                            indice_key=f'subm{i + 1}'))
wuyuefeng's avatar
wuyuefeng committed
369
                in_channels = out_channels
370
            stage_name = f'encoder_layer{i + 1}'
wuyuefeng's avatar
wuyuefeng committed
371
            stage_layers = spconv.SparseSequential(*blocks_list)
wuyuefeng's avatar
wuyuefeng committed
372
            self.encoder_layers.add_module(stage_name, stage_layers)
wuyuefeng's avatar
wuyuefeng committed
373
374
        return out_channels

375
376
    def make_decoder_layers(self, make_block, norm_cfg, in_channels):
        """make decoder layers using sparse convs
wuyuefeng's avatar
wuyuefeng committed
377
378
379

        Args:
            make_block (method): a bounded function to build blocks
380
            norm_cfg (dict[str]): config of normalization layer
wuyuefeng's avatar
wuyuefeng committed
381
382
383
384
385
            in_channels (int): the number of encoder input channels

        Returns:
            int: the number of encoder output channels
        """
386
387
388
        block_num = len(self.decoder_channels)
        for i, block_channels in enumerate(self.decoder_channels):
            paddings = self.decoder_paddings[i]
wuyuefeng's avatar
wuyuefeng committed
389
            setattr(
390
                self, f'lateral_layer{block_num - i}',
wuyuefeng's avatar
wuyuefeng committed
391
392
393
394
                SparseBasicBlock(
                    in_channels,
                    block_channels[0],
                    conv_cfg=dict(
395
                        type='SubMConv3d', indice_key=f'subm{block_num - i}'),
wuyuefeng's avatar
wuyuefeng committed
396
397
                    norm_cfg=norm_cfg))
            setattr(
398
                self, f'merge_layer{block_num - i}',
wuyuefeng's avatar
wuyuefeng committed
399
400
401
402
403
404
                make_block(
                    in_channels * 2,
                    block_channels[1],
                    3,
                    norm_cfg=norm_cfg,
                    padding=paddings[0],
405
                    indice_key=f'subm{block_num - i}'))
wuyuefeng's avatar
wuyuefeng committed
406
407
408
409
410
411
412
413
414
415
416
417
418
            if block_num - i != 1:
                setattr(
                    self, f'upsample_layer{block_num - i}',
                    make_block(
                        in_channels,
                        block_channels[2],
                        3,
                        norm_cfg=norm_cfg,
                        padding=paddings[1],
                        indice_key=f'spconv{block_num - i}',
                        conv_type='inverseconv'))
            else:
                # use submanifold conv instead of inverse conv
wuyuefeng's avatar
wuyuefeng committed
419
                # in the last block
wuyuefeng's avatar
wuyuefeng committed
420
421
422
423
424
425
426
427
428
429
                setattr(
                    self, f'upsample_layer{block_num - i}',
                    make_block(
                        in_channels,
                        block_channels[2],
                        3,
                        norm_cfg=norm_cfg,
                        padding=paddings[1],
                        indice_key='subm1',
                        conv_type='subm'))
wuyuefeng's avatar
wuyuefeng committed
430
            in_channels = block_channels[2]