pointnet2_sa_ssg.py 5.43 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
wuyuefeng's avatar
wuyuefeng committed
2
import torch
3
from mmcv.runner import auto_fp16
zhangwenwei's avatar
zhangwenwei committed
4
from torch import nn as nn
wuyuefeng's avatar
wuyuefeng committed
5

zhangshilong's avatar
zhangshilong committed
6
from mmdet3d.models.layers import PointFPModule, build_sa_module
7
from mmdet3d.registry import MODELS
8
from .base_pointnet import BasePointNet
wuyuefeng's avatar
wuyuefeng committed
9
10


11
@MODELS.register_module()
12
class PointNet2SASSG(BasePointNet):
wuyuefeng's avatar
wuyuefeng committed
13
    """PointNet2 with Single-scale grouping.
wuyuefeng's avatar
wuyuefeng committed
14
15

    Args:
wangtai's avatar
wangtai committed
16
17
        in_channels (int): Input channels of point cloud.
        num_points (tuple[int]): The number of points which each SA
wuyuefeng's avatar
wuyuefeng committed
18
            module samples.
wangtai's avatar
wangtai committed
19
20
        radius (tuple[float]): Sampling radii of each SA module.
        num_samples (tuple[int]): The number of samples for ball
wuyuefeng's avatar
wuyuefeng committed
21
            query in each SA module.
wangtai's avatar
wangtai committed
22
23
24
        sa_channels (tuple[tuple[int]]): Out channels of each mlp in SA module.
        fp_channels (tuple[tuple[int]]): Out channels of each mlp in FP module.
        norm_cfg (dict): Config of normalization layer.
25
26
27
28
29
30
31
        sa_cfg (dict): Config of set abstraction module, which may contain
            the following keys and values:

            - pool_mod (str): Pool method ('max' or 'avg') for SA modules.
            - use_xyz (bool): Whether to use xyz as a part of features.
            - normalize_xyz (bool): Whether to normalize xyz with radii in
              each SA module.
wuyuefeng's avatar
wuyuefeng committed
32
33
34
35
36
37
38
39
40
41
42
    """

    def __init__(self,
                 in_channels,
                 num_points=(2048, 1024, 512, 256),
                 radius=(0.2, 0.4, 0.8, 1.2),
                 num_samples=(64, 32, 16, 16),
                 sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256),
                              (128, 128, 256)),
                 fp_channels=((256, 256), (256, 256)),
                 norm_cfg=dict(type='BN2d'),
43
44
45
46
                 sa_cfg=dict(
                     type='PointSAModule',
                     pool_mod='max',
                     use_xyz=True,
47
48
49
                     normalize_xyz=True),
                 init_cfg=None):
        super().__init__(init_cfg=init_cfg)
wuyuefeng's avatar
wuyuefeng committed
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
        self.num_sa = len(sa_channels)
        self.num_fp = len(fp_channels)

        assert len(num_points) == len(radius) == len(num_samples) == len(
            sa_channels)
        assert len(sa_channels) >= len(fp_channels)

        self.SA_modules = nn.ModuleList()
        sa_in_channel = in_channels - 3  # number of channels without xyz
        skip_channel_list = [sa_in_channel]

        for sa_index in range(self.num_sa):
            cur_sa_mlps = list(sa_channels[sa_index])
            cur_sa_mlps = [sa_in_channel] + cur_sa_mlps
            sa_out_channel = cur_sa_mlps[-1]

            self.SA_modules.append(
67
                build_sa_module(
wuyuefeng's avatar
wuyuefeng committed
68
69
70
71
72
                    num_point=num_points[sa_index],
                    radius=radius[sa_index],
                    num_sample=num_samples[sa_index],
                    mlp_channels=cur_sa_mlps,
                    norm_cfg=norm_cfg,
73
                    cfg=sa_cfg))
wuyuefeng's avatar
wuyuefeng committed
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
            skip_channel_list.append(sa_out_channel)
            sa_in_channel = sa_out_channel

        self.FP_modules = nn.ModuleList()

        fp_source_channel = skip_channel_list.pop()
        fp_target_channel = skip_channel_list.pop()
        for fp_index in range(len(fp_channels)):
            cur_fp_mlps = list(fp_channels[fp_index])
            cur_fp_mlps = [fp_source_channel + fp_target_channel] + cur_fp_mlps
            self.FP_modules.append(PointFPModule(mlp_channels=cur_fp_mlps))
            if fp_index != len(fp_channels) - 1:
                fp_source_channel = cur_fp_mlps[-1]
                fp_target_channel = skip_channel_list.pop()

89
    @auto_fp16(apply_to=('points', ))
wuyuefeng's avatar
wuyuefeng committed
90
91
92
93
    def forward(self, points):
        """Forward pass.

        Args:
94
            points (torch.Tensor): point coordinates with features,
wuyuefeng's avatar
wuyuefeng committed
95
96
97
                with shape (B, N, 3 + input_feature_dim).

        Returns:
wangtai's avatar
wangtai committed
98
            dict[str, list[torch.Tensor]]: Outputs after SA and FP modules.
99

100
                - fp_xyz (list[torch.Tensor]): The coordinates of
wuyuefeng's avatar
wuyuefeng committed
101
                    each fp features.
102
                - fp_features (list[torch.Tensor]): The features
wuyuefeng's avatar
wuyuefeng committed
103
                    from each Feature Propagate Layers.
104
                - fp_indices (list[torch.Tensor]): Indices of the
wuyuefeng's avatar
wuyuefeng committed
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
                    input points.
        """
        xyz, features = self._split_point_feats(points)

        batch, num_points = xyz.shape[:2]
        indices = xyz.new_tensor(range(num_points)).unsqueeze(0).repeat(
            batch, 1).long()

        sa_xyz = [xyz]
        sa_features = [features]
        sa_indices = [indices]

        for i in range(self.num_sa):
            cur_xyz, cur_features, cur_indices = self.SA_modules[i](
                sa_xyz[i], sa_features[i])
            sa_xyz.append(cur_xyz)
            sa_features.append(cur_features)
            sa_indices.append(
                torch.gather(sa_indices[-1], 1, cur_indices.long()))

        fp_xyz = [sa_xyz[-1]]
        fp_features = [sa_features[-1]]
        fp_indices = [sa_indices[-1]]

        for i in range(self.num_fp):
            fp_features.append(self.FP_modules[i](
                sa_xyz[self.num_sa - i - 1], sa_xyz[self.num_sa - i],
                sa_features[self.num_sa - i - 1], fp_features[-1]))
            fp_xyz.append(sa_xyz[self.num_sa - i - 1])
            fp_indices.append(sa_indices[self.num_sa - i - 1])

        ret = dict(
137
138
139
140
141
142
            fp_xyz=fp_xyz,
            fp_features=fp_features,
            fp_indices=fp_indices,
            sa_xyz=sa_xyz,
            sa_features=sa_features,
            sa_indices=sa_indices)
wuyuefeng's avatar
wuyuefeng committed
143
        return ret