utils.py 6.75 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
zhangwenwei's avatar
zhangwenwei committed
2
import torch
3
from mmcv.cnn import build_norm_layer
4
from mmcv.runner import auto_fp16
zhangwenwei's avatar
zhangwenwei committed
5
6
7
8
9
10
11
12
from torch import nn
from torch.nn import functional as F


def get_paddings_indicator(actual_num, max_num, axis=0):
    """Create boolean mask by actually number of a padded tensor.

    Args:
zhangwenwei's avatar
zhangwenwei committed
13
14
        actual_num (torch.Tensor): Actual number of points in each voxel.
        max_num (int): Max number of points in each voxel
zhangwenwei's avatar
zhangwenwei committed
15
16

    Returns:
zhangwenwei's avatar
zhangwenwei committed
17
        torch.Tensor: Mask indicates which points are valid inside a voxel.
zhangwenwei's avatar
zhangwenwei committed
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
    """
    actual_num = torch.unsqueeze(actual_num, axis + 1)
    # tiled_actual_num: [N, M, 1]
    max_num_shape = [1] * len(actual_num.shape)
    max_num_shape[axis + 1] = -1
    max_num = torch.arange(
        max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
    # tiled_actual_num: [[3,3,3,3,3], [4,4,4,4,4], [2,2,2,2,2]]
    # tiled_max_num: [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]]
    paddings_indicator = actual_num.int() > max_num
    # paddings_indicator shape: [batch_size, max_num]
    return paddings_indicator


class VFELayer(nn.Module):
zhangwenwei's avatar
zhangwenwei committed
33
    """Voxel Feature Encoder layer.
zhangwenwei's avatar
zhangwenwei committed
34
35
36
37
38
39
40
41
42
43
44
45
46
47

    The voxel encoder is composed of a series of these layers.
    This module do not support average pooling and only support to use
    max pooling to gather features inside a VFE.

    Args:
        in_channels (int): Number of input channels.
        out_channels (int): Number of output channels.
        norm_cfg (dict): Config dict of normalization layers
        max_out (bool): Whether aggregate the features of points inside
            each voxel and only return voxel features.
        cat_max (bool): Whether concatenate the aggregated features
            and pointwise features.
    """
zhangwenwei's avatar
zhangwenwei committed
48
49
50
51
52
53
54
55

    def __init__(self,
                 in_channels,
                 out_channels,
                 norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01),
                 max_out=True,
                 cat_max=True):
        super(VFELayer, self).__init__()
56
        self.fp16_enabled = False
zhangwenwei's avatar
zhangwenwei committed
57
58
59
        self.cat_max = cat_max
        self.max_out = max_out
        # self.units = int(out_channels / 2)
zhangwenwei's avatar
zhangwenwei committed
60
61
62

        self.norm = build_norm_layer(norm_cfg, out_channels)[1]
        self.linear = nn.Linear(in_channels, out_channels, bias=False)
zhangwenwei's avatar
zhangwenwei committed
63

64
    @auto_fp16(apply_to=('inputs'), out_fp32=True)
zhangwenwei's avatar
zhangwenwei committed
65
    def forward(self, inputs):
zhangwenwei's avatar
zhangwenwei committed
66
        """Forward function.
zhangwenwei's avatar
zhangwenwei committed
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82

        Args:
            inputs (torch.Tensor): Voxels features of shape (N, M, C).
                N is the number of voxels, M is the number of points in
                voxels, C is the number of channels of point features.

        Returns:
            torch.Tensor: Voxel features. There are three mode under which the
                features have different meaning.
                - `max_out=False`: Return point-wise features in
                    shape (N, M, C).
                - `max_out=True` and `cat_max=False`: Return aggregated
                    voxel features in shape (N, C)
                - `max_out=True` and `cat_max=True`: Return concatenated
                    point-wise features in shape (N, M, C).
        """
zhangwenwei's avatar
zhangwenwei committed
83
84
        # [K, T, 7] tensordot [7, units] = [K, T, units]
        voxel_count = inputs.shape[1]
85

zhangwenwei's avatar
zhangwenwei committed
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
        x = self.linear(inputs)
        x = self.norm(x.permute(0, 2, 1).contiguous()).permute(0, 2,
                                                               1).contiguous()
        pointwise = F.relu(x)
        # [K, T, units]
        if self.max_out:
            aggregated = torch.max(pointwise, dim=1, keepdim=True)[0]
        else:
            # this is for fusion layer
            return pointwise

        if not self.cat_max:
            return aggregated.squeeze(1)
        else:
            # [K, 1, units]
            repeated = aggregated.repeat(1, voxel_count, 1)
            concatenated = torch.cat([pointwise, repeated], dim=2)
            # [K, T, 2 * units]
            return concatenated


class PFNLayer(nn.Module):
zhangwenwei's avatar
zhangwenwei committed
108
    """Pillar Feature Net Layer.
zhangwenwei's avatar
zhangwenwei committed
109
110
111
112
113
114
115

    The Pillar Feature Net is composed of a series of these layers, but the
    PointPillars paper results only used a single PFNLayer.

    Args:
        in_channels (int): Number of input channels.
        out_channels (int): Number of output channels.
116
117
118
119
120
121
        norm_cfg (dict, optional): Config dict of normalization layers.
            Defaults to dict(type='BN1d', eps=1e-3, momentum=0.01).
        last_layer (bool, optional): If last_layer, there is no
            concatenation of features. Defaults to False.
        mode (str, optional): Pooling model to gather features inside voxels.
            Defaults to 'max'.
zhangwenwei's avatar
zhangwenwei committed
122
    """
zhangwenwei's avatar
zhangwenwei committed
123
124
125
126

    def __init__(self,
                 in_channels,
                 out_channels,
zhangwenwei's avatar
zhangwenwei committed
127
                 norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01),
zhangwenwei's avatar
zhangwenwei committed
128
129
130
131
                 last_layer=False,
                 mode='max'):

        super().__init__()
132
        self.fp16_enabled = False
zhangwenwei's avatar
zhangwenwei committed
133
134
135
136
137
138
        self.name = 'PFNLayer'
        self.last_vfe = last_layer
        if not self.last_vfe:
            out_channels = out_channels // 2
        self.units = out_channels

zhangwenwei's avatar
zhangwenwei committed
139
140
        self.norm = build_norm_layer(norm_cfg, self.units)[1]
        self.linear = nn.Linear(in_channels, self.units, bias=False)
zhangwenwei's avatar
zhangwenwei committed
141

zhangwenwei's avatar
zhangwenwei committed
142
        assert mode in ['max', 'avg']
zhangwenwei's avatar
zhangwenwei committed
143
144
        self.mode = mode

145
    @auto_fp16(apply_to=('inputs'), out_fp32=True)
zhangwenwei's avatar
zhangwenwei committed
146
    def forward(self, inputs, num_voxels=None, aligned_distance=None):
zhangwenwei's avatar
zhangwenwei committed
147
        """Forward function.
zhangwenwei's avatar
zhangwenwei committed
148

zhangwenwei's avatar
zhangwenwei committed
149
150
151
152
153
154
155
156
157
158
159
160
        Args:
            inputs (torch.Tensor): Pillar/Voxel inputs with shape (N, M, C).
                N is the number of voxels, M is the number of points in
                voxels, C is the number of channels of point features.
            num_voxels (torch.Tensor, optional): Number of points in each
                voxel. Defaults to None.
            aligned_distance (torch.Tensor, optional): The distance of
                each points to the voxel center. Defaults to None.

        Returns:
            torch.Tensor: Features of Pillars.
        """
zhangwenwei's avatar
zhangwenwei committed
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
        x = self.linear(inputs)
        x = self.norm(x.permute(0, 2, 1).contiguous()).permute(0, 2,
                                                               1).contiguous()
        x = F.relu(x)

        if self.mode == 'max':
            if aligned_distance is not None:
                x = x.mul(aligned_distance.unsqueeze(-1))
            x_max = torch.max(x, dim=1, keepdim=True)[0]
        elif self.mode == 'avg':
            if aligned_distance is not None:
                x = x.mul(aligned_distance.unsqueeze(-1))
            x_max = x.sum(
                dim=1, keepdim=True) / num_voxels.type_as(inputs).view(
                    -1, 1, 1)

        if self.last_vfe:
            return x_max
        else:
            x_repeat = x_max.repeat(1, inputs.shape[1], 1)
            x_concatenated = torch.cat([x, x_repeat], dim=2)
            return x_concatenated