second.py 3.47 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
2
import warnings
3
from typing import Optional, Sequence, Tuple
4

zhangwenwei's avatar
zhangwenwei committed
5
from mmcv.cnn import build_conv_layer, build_norm_layer
6
from mmengine.model import BaseModule
7
from torch import Tensor
zhangwenwei's avatar
zhangwenwei committed
8
from torch import nn as nn
zhangwenwei's avatar
zhangwenwei committed
9

10
from mmdet3d.registry import MODELS
11
from mmdet3d.utils import ConfigType, OptMultiConfig
zhangwenwei's avatar
zhangwenwei committed
12
13


14
@MODELS.register_module()
15
class SECOND(BaseModule):
zhangwenwei's avatar
zhangwenwei committed
16
    """Backbone network for SECOND/PointPillars/PartA2/MVXNet.
zhangwenwei's avatar
zhangwenwei committed
17
18

    Args:
wangtai's avatar
wangtai committed
19
20
21
22
23
24
        in_channels (int): Input channels.
        out_channels (list[int]): Output channels for multi-scale feature maps.
        layer_nums (list[int]): Number of layers in each stage.
        layer_strides (list[int]): Strides of each stage.
        norm_cfg (dict): Config dict of normalization layers.
        conv_cfg (dict): Config dict of convolutional layers.
zhangwenwei's avatar
zhangwenwei committed
25
26
27
    """

    def __init__(self,
28
29
30
31
32
33
34
35
36
                 in_channels: int = 128,
                 out_channels: Sequence[int] = [128, 128, 256],
                 layer_nums: Sequence[int] = [3, 5, 5],
                 layer_strides: Sequence[int] = [2, 2, 2],
                 norm_cfg: ConfigType = dict(
                     type='BN', eps=1e-3, momentum=0.01),
                 conv_cfg: ConfigType = dict(type='Conv2d', bias=False),
                 init_cfg: OptMultiConfig = None,
                 pretrained: Optional[str] = None) -> None:
37
        super(SECOND, self).__init__(init_cfg=init_cfg)
zhangwenwei's avatar
zhangwenwei committed
38
        assert len(layer_strides) == len(layer_nums)
zhangwenwei's avatar
zhangwenwei committed
39
        assert len(out_channels) == len(layer_nums)
zhangwenwei's avatar
zhangwenwei committed
40

zhangwenwei's avatar
zhangwenwei committed
41
        in_filters = [in_channels, *out_channels[:-1]]
zhangwenwei's avatar
zhangwenwei committed
42
43
44
45
46
        # note that when stride > 1, conv2d with same padding isn't
        # equal to pad-conv2d. we should use pad-conv2d.
        blocks = []
        for i, layer_num in enumerate(layer_nums):
            block = [
zhangwenwei's avatar
zhangwenwei committed
47
48
49
50
51
52
53
54
                build_conv_layer(
                    conv_cfg,
                    in_filters[i],
                    out_channels[i],
                    3,
                    stride=layer_strides[i],
                    padding=1),
                build_norm_layer(norm_cfg, out_channels[i])[1],
zhangwenwei's avatar
zhangwenwei committed
55
56
57
58
                nn.ReLU(inplace=True),
            ]
            for j in range(layer_num):
                block.append(
zhangwenwei's avatar
zhangwenwei committed
59
60
61
62
63
64
65
                    build_conv_layer(
                        conv_cfg,
                        out_channels[i],
                        out_channels[i],
                        3,
                        padding=1))
                block.append(build_norm_layer(norm_cfg, out_channels[i])[1])
zhangwenwei's avatar
zhangwenwei committed
66
67
68
69
70
71
72
                block.append(nn.ReLU(inplace=True))

            block = nn.Sequential(*block)
            blocks.append(block)

        self.blocks = nn.ModuleList(blocks)

73
74
        assert not (init_cfg and pretrained), \
            'init_cfg and pretrained cannot be setting at the same time'
zhangwenwei's avatar
zhangwenwei committed
75
        if isinstance(pretrained, str):
76
77
78
79
80
            warnings.warn('DeprecationWarning: pretrained is a deprecated, '
                          'please use "init_cfg" instead')
            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
        else:
            self.init_cfg = dict(type='Kaiming', layer='Conv2d')
zhangwenwei's avatar
zhangwenwei committed
81

82
    def forward(self, x: Tensor) -> Tuple[Tensor, ...]:
83
84
85
86
87
88
89
90
        """Forward function.

        Args:
            x (torch.Tensor): Input with shape (N, C, H, W).

        Returns:
            tuple[torch.Tensor]: Multi-scale features.
        """
zhangwenwei's avatar
zhangwenwei committed
91
92
93
94
95
        outs = []
        for i in range(len(self.blocks)):
            x = self.blocks[i](x)
            outs.append(x)
        return tuple(outs)