sparse_encoder.py 8.11 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
2
from mmcv.ops import SparseConvTensor, SparseSequential
3
from mmcv.runner import auto_fp16
zhangwenwei's avatar
zhangwenwei committed
4
from torch import nn as nn
zhangwenwei's avatar
zhangwenwei committed
5

6
from mmdet3d.ops import SparseBasicBlock, make_sparse_convmodule
7
from ..builder import MIDDLE_ENCODERS
zhangwenwei's avatar
zhangwenwei committed
8
9


10
@MIDDLE_ENCODERS.register_module()
zhangwenwei's avatar
zhangwenwei committed
11
class SparseEncoder(nn.Module):
zhangwenwei's avatar
zhangwenwei committed
12
    r"""Sparse encoder for SECOND and Part-A2.
wuyuefeng's avatar
wuyuefeng committed
13
14

    Args:
wangtai's avatar
wangtai committed
15
16
        in_channels (int): The number of input channels.
        sparse_shape (list[int]): The sparse shape of input tensor.
17
18
19
        order (list[str], optional): Order of conv module.
            Defaults to ('conv', 'norm', 'act').
        norm_cfg (dict, optional): Config of normalization layer. Defaults to
20
            dict(type='BN1d', eps=1e-3, momentum=0.01).
21
        base_channels (int, optional): Out channels for conv_input layer.
22
            Defaults to 16.
23
        output_channels (int, optional): Out channels for conv_out layer.
24
            Defaults to 128.
25
        encoder_channels (tuple[tuple[int]], optional):
wangtai's avatar
wangtai committed
26
            Convolutional channels of each encode block.
27
28
        encoder_paddings (tuple[tuple[int]], optional):
            Paddings of each encode block.
29
            Defaults to ((16, ), (32, 32, 32), (64, 64, 64), (64, 64, 64)).
30
31
        block_type (str, optional): Type of the block to use.
            Defaults to 'conv_module'.
wuyuefeng's avatar
wuyuefeng committed
32
    """
zhangwenwei's avatar
zhangwenwei committed
33
34
35

    def __init__(self,
                 in_channels,
wuyuefeng's avatar
wuyuefeng committed
36
37
38
39
40
41
42
43
                 sparse_shape,
                 order=('conv', 'norm', 'act'),
                 norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01),
                 base_channels=16,
                 output_channels=128,
                 encoder_channels=((16, ), (32, 32, 32), (64, 64, 64), (64, 64,
                                                                        64)),
                 encoder_paddings=((1, ), (1, 1, 1), (1, 1, 1), ((0, 1, 1), 1,
44
45
                                                                 1)),
                 block_type='conv_module'):
zhangwenwei's avatar
zhangwenwei committed
46
        super().__init__()
47
        assert block_type in ['conv_module', 'basicblock']
wuyuefeng's avatar
wuyuefeng committed
48
        self.sparse_shape = sparse_shape
zhangwenwei's avatar
zhangwenwei committed
49
        self.in_channels = in_channels
wuyuefeng's avatar
wuyuefeng committed
50
51
52
53
54
55
        self.order = order
        self.base_channels = base_channels
        self.output_channels = output_channels
        self.encoder_channels = encoder_channels
        self.encoder_paddings = encoder_paddings
        self.stage_num = len(self.encoder_channels)
56
        self.fp16_enabled = False
zhangwenwei's avatar
zhangwenwei committed
57
        # Spconv init all weight on its own
wuyuefeng's avatar
wuyuefeng committed
58
59
60
61
62
63
64
65

        assert isinstance(order, tuple) and len(order) == 3
        assert set(order) == {'conv', 'norm', 'act'}

        if self.order[0] != 'conv':  # pre activate
            self.conv_input = make_sparse_convmodule(
                in_channels,
                self.base_channels,
zhangwenwei's avatar
zhangwenwei committed
66
67
68
                3,
                norm_cfg=norm_cfg,
                padding=1,
wuyuefeng's avatar
wuyuefeng committed
69
70
71
72
73
74
75
                indice_key='subm1',
                conv_type='SubMConv3d',
                order=('conv', ))
        else:  # post activate
            self.conv_input = make_sparse_convmodule(
                in_channels,
                self.base_channels,
zhangwenwei's avatar
zhangwenwei committed
76
77
78
                3,
                norm_cfg=norm_cfg,
                padding=1,
wuyuefeng's avatar
wuyuefeng committed
79
80
81
82
                indice_key='subm1',
                conv_type='SubMConv3d')

        encoder_out_channels = self.make_encoder_layers(
83
84
85
86
            make_sparse_convmodule,
            norm_cfg,
            self.base_channels,
            block_type=block_type)
wuyuefeng's avatar
wuyuefeng committed
87
88
89
90
91
92
93
94
95
96

        self.conv_out = make_sparse_convmodule(
            encoder_out_channels,
            self.output_channels,
            kernel_size=(3, 1, 1),
            stride=(2, 1, 1),
            norm_cfg=norm_cfg,
            padding=0,
            indice_key='spconv_down2',
            conv_type='SparseConv3d')
zhangwenwei's avatar
zhangwenwei committed
97

98
    @auto_fp16(apply_to=('voxel_features', ))
zhangwenwei's avatar
zhangwenwei committed
99
    def forward(self, voxel_features, coors, batch_size):
zhangwenwei's avatar
zhangwenwei committed
100
        """Forward of SparseEncoder.
wuyuefeng's avatar
wuyuefeng committed
101
102

        Args:
wangtai's avatar
wangtai committed
103
            voxel_features (torch.float32): Voxel features in shape (N, C).
104
            coors (torch.int32): Coordinates in shape (N, 4),
wangtai's avatar
wangtai committed
105
106
                the columns in the order of (batch_idx, z_idx, y_idx, x_idx).
            batch_size (int): Batch size.
wuyuefeng's avatar
wuyuefeng committed
107
108

        Returns:
wangtai's avatar
wangtai committed
109
            dict: Backbone features.
zhangwenwei's avatar
zhangwenwei committed
110
111
        """
        coors = coors.int()
112
113
        input_sp_tensor = SparseConvTensor(voxel_features, coors,
                                           self.sparse_shape, batch_size)
zhangwenwei's avatar
zhangwenwei committed
114
115
        x = self.conv_input(input_sp_tensor)

wuyuefeng's avatar
wuyuefeng committed
116
117
118
119
        encode_features = []
        for encoder_layer in self.encoder_layers:
            x = encoder_layer(x)
            encode_features.append(x)
zhangwenwei's avatar
zhangwenwei committed
120
121
122

        # for detection head
        # [200, 176, 5] -> [200, 176, 2]
wuyuefeng's avatar
wuyuefeng committed
123
        out = self.conv_out(encode_features[-1])
zhangwenwei's avatar
zhangwenwei committed
124
125
126
127
128
129
130
        spatial_features = out.dense()

        N, C, D, H, W = spatial_features.shape
        spatial_features = spatial_features.view(N, C * D, H, W)

        return spatial_features

131
132
133
134
135
136
    def make_encoder_layers(self,
                            make_block,
                            norm_cfg,
                            in_channels,
                            block_type='conv_module',
                            conv_cfg=dict(type='SubMConv3d')):
zhangwenwei's avatar
zhangwenwei committed
137
        """make encoder layers using sparse convs.
wuyuefeng's avatar
wuyuefeng committed
138
139

        Args:
wangtai's avatar
wangtai committed
140
141
142
            make_block (method): A bounded function to build blocks.
            norm_cfg (dict[str]): Config of normalization layer.
            in_channels (int): The number of encoder input channels.
143
144
145
            block_type (str, optional): Type of the block to use.
                Defaults to 'conv_module'.
            conv_cfg (dict, optional): Config of conv layer. Defaults to
146
                dict(type='SubMConv3d').
wuyuefeng's avatar
wuyuefeng committed
147
148

        Returns:
wangtai's avatar
wangtai committed
149
            int: The number of encoder output channels.
wuyuefeng's avatar
wuyuefeng committed
150
        """
151
        assert block_type in ['conv_module', 'basicblock']
152
        self.encoder_layers = SparseSequential()
wuyuefeng's avatar
wuyuefeng committed
153
154
155
156
157
158
159

        for i, blocks in enumerate(self.encoder_channels):
            blocks_list = []
            for j, out_channels in enumerate(tuple(blocks)):
                padding = tuple(self.encoder_paddings[i])[j]
                # each stage started with a spconv layer
                # except the first stage
160
                if i != 0 and j == 0 and block_type == 'conv_module':
wuyuefeng's avatar
wuyuefeng committed
161
162
163
164
165
166
167
168
169
170
                    blocks_list.append(
                        make_block(
                            in_channels,
                            out_channels,
                            3,
                            norm_cfg=norm_cfg,
                            stride=2,
                            padding=padding,
                            indice_key=f'spconv{i + 1}',
                            conv_type='SparseConv3d'))
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
                elif block_type == 'basicblock':
                    if j == len(blocks) - 1 and i != len(
                            self.encoder_channels) - 1:
                        blocks_list.append(
                            make_block(
                                in_channels,
                                out_channels,
                                3,
                                norm_cfg=norm_cfg,
                                stride=2,
                                padding=padding,
                                indice_key=f'spconv{i + 1}',
                                conv_type='SparseConv3d'))
                    else:
                        blocks_list.append(
                            SparseBasicBlock(
                                out_channels,
                                out_channels,
                                norm_cfg=norm_cfg,
                                conv_cfg=conv_cfg))
wuyuefeng's avatar
wuyuefeng committed
191
192
193
194
195
196
197
198
199
200
201
202
                else:
                    blocks_list.append(
                        make_block(
                            in_channels,
                            out_channels,
                            3,
                            norm_cfg=norm_cfg,
                            padding=padding,
                            indice_key=f'subm{i + 1}',
                            conv_type='SubMConv3d'))
                in_channels = out_channels
            stage_name = f'encoder_layer{i + 1}'
203
            stage_layers = SparseSequential(*blocks_list)
wuyuefeng's avatar
wuyuefeng committed
204
205
            self.encoder_layers.add_module(stage_name, stage_layers)
        return out_channels