utils.py 6.39 KB
Newer Older
zhangwenwei's avatar
zhangwenwei committed
1
import torch
2
from mmcv.cnn import build_norm_layer
zhangwenwei's avatar
zhangwenwei committed
3
4
5
6
7
8
9
10
from torch import nn
from torch.nn import functional as F


def get_paddings_indicator(actual_num, max_num, axis=0):
    """Create boolean mask by actually number of a padded tensor.

    Args:
zhangwenwei's avatar
zhangwenwei committed
11
12
        actual_num (torch.Tensor): Actual number of points in each voxel.
        max_num (int): Max number of points in each voxel
zhangwenwei's avatar
zhangwenwei committed
13
14

    Returns:
zhangwenwei's avatar
zhangwenwei committed
15
        torch.Tensor: Mask indicates which points are valid inside a voxel.
zhangwenwei's avatar
zhangwenwei committed
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
    """
    actual_num = torch.unsqueeze(actual_num, axis + 1)
    # tiled_actual_num: [N, M, 1]
    max_num_shape = [1] * len(actual_num.shape)
    max_num_shape[axis + 1] = -1
    max_num = torch.arange(
        max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
    # tiled_actual_num: [[3,3,3,3,3], [4,4,4,4,4], [2,2,2,2,2]]
    # tiled_max_num: [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]]
    paddings_indicator = actual_num.int() > max_num
    # paddings_indicator shape: [batch_size, max_num]
    return paddings_indicator


class VFELayer(nn.Module):
zhangwenwei's avatar
zhangwenwei committed
31
    """Voxel Feature Encoder layer.
zhangwenwei's avatar
zhangwenwei committed
32
33
34
35
36
37
38
39
40
41
42
43
44
45

    The voxel encoder is composed of a series of these layers.
    This module do not support average pooling and only support to use
    max pooling to gather features inside a VFE.

    Args:
        in_channels (int): Number of input channels.
        out_channels (int): Number of output channels.
        norm_cfg (dict): Config dict of normalization layers
        max_out (bool): Whether aggregate the features of points inside
            each voxel and only return voxel features.
        cat_max (bool): Whether concatenate the aggregated features
            and pointwise features.
    """
zhangwenwei's avatar
zhangwenwei committed
46
47
48
49
50
51
52
53
54
55
56

    def __init__(self,
                 in_channels,
                 out_channels,
                 norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01),
                 max_out=True,
                 cat_max=True):
        super(VFELayer, self).__init__()
        self.cat_max = cat_max
        self.max_out = max_out
        # self.units = int(out_channels / 2)
zhangwenwei's avatar
zhangwenwei committed
57
58
59

        self.norm = build_norm_layer(norm_cfg, out_channels)[1]
        self.linear = nn.Linear(in_channels, out_channels, bias=False)
zhangwenwei's avatar
zhangwenwei committed
60
61

    def forward(self, inputs):
zhangwenwei's avatar
zhangwenwei committed
62
        """Forward function.
zhangwenwei's avatar
zhangwenwei committed
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78

        Args:
            inputs (torch.Tensor): Voxels features of shape (N, M, C).
                N is the number of voxels, M is the number of points in
                voxels, C is the number of channels of point features.

        Returns:
            torch.Tensor: Voxel features. There are three mode under which the
                features have different meaning.
                - `max_out=False`: Return point-wise features in
                    shape (N, M, C).
                - `max_out=True` and `cat_max=False`: Return aggregated
                    voxel features in shape (N, C)
                - `max_out=True` and `cat_max=True`: Return concatenated
                    point-wise features in shape (N, M, C).
        """
zhangwenwei's avatar
zhangwenwei committed
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
        # [K, T, 7] tensordot [7, units] = [K, T, units]
        voxel_count = inputs.shape[1]
        x = self.linear(inputs)
        x = self.norm(x.permute(0, 2, 1).contiguous()).permute(0, 2,
                                                               1).contiguous()
        pointwise = F.relu(x)
        # [K, T, units]
        if self.max_out:
            aggregated = torch.max(pointwise, dim=1, keepdim=True)[0]
        else:
            # this is for fusion layer
            return pointwise

        if not self.cat_max:
            return aggregated.squeeze(1)
        else:
            # [K, 1, units]
            repeated = aggregated.repeat(1, voxel_count, 1)
            concatenated = torch.cat([pointwise, repeated], dim=2)
            # [K, T, 2 * units]
            return concatenated


class PFNLayer(nn.Module):
zhangwenwei's avatar
zhangwenwei committed
103
    """Pillar Feature Net Layer.
zhangwenwei's avatar
zhangwenwei committed
104
105
106
107
108
109
110
111
112
113
114
115
116

    The Pillar Feature Net is composed of a series of these layers, but the
    PointPillars paper results only used a single PFNLayer.

    Args:
        in_channels (int): Number of input channels.
        out_channels (int): Number of output channels.
        norm_cfg (dict): Config dict of normalization layers
        last_layer (bool): If last_layer, there is no concatenation of
            features.
        mode (str): Pooling model to gather features inside voxels.
            Default to 'max'.
    """
zhangwenwei's avatar
zhangwenwei committed
117
118
119
120

    def __init__(self,
                 in_channels,
                 out_channels,
zhangwenwei's avatar
zhangwenwei committed
121
                 norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01),
zhangwenwei's avatar
zhangwenwei committed
122
123
124
125
126
127
128
129
130
131
                 last_layer=False,
                 mode='max'):

        super().__init__()
        self.name = 'PFNLayer'
        self.last_vfe = last_layer
        if not self.last_vfe:
            out_channels = out_channels // 2
        self.units = out_channels

zhangwenwei's avatar
zhangwenwei committed
132
133
        self.norm = build_norm_layer(norm_cfg, self.units)[1]
        self.linear = nn.Linear(in_channels, self.units, bias=False)
zhangwenwei's avatar
zhangwenwei committed
134

zhangwenwei's avatar
zhangwenwei committed
135
        assert mode in ['max', 'avg']
zhangwenwei's avatar
zhangwenwei committed
136
137
138
        self.mode = mode

    def forward(self, inputs, num_voxels=None, aligned_distance=None):
zhangwenwei's avatar
zhangwenwei committed
139
        """Forward function.
zhangwenwei's avatar
zhangwenwei committed
140

zhangwenwei's avatar
zhangwenwei committed
141
142
143
144
145
146
147
148
149
150
151
152
        Args:
            inputs (torch.Tensor): Pillar/Voxel inputs with shape (N, M, C).
                N is the number of voxels, M is the number of points in
                voxels, C is the number of channels of point features.
            num_voxels (torch.Tensor, optional): Number of points in each
                voxel. Defaults to None.
            aligned_distance (torch.Tensor, optional): The distance of
                each points to the voxel center. Defaults to None.

        Returns:
            torch.Tensor: Features of Pillars.
        """
zhangwenwei's avatar
zhangwenwei committed
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
        x = self.linear(inputs)
        x = self.norm(x.permute(0, 2, 1).contiguous()).permute(0, 2,
                                                               1).contiguous()
        x = F.relu(x)

        if self.mode == 'max':
            if aligned_distance is not None:
                x = x.mul(aligned_distance.unsqueeze(-1))
            x_max = torch.max(x, dim=1, keepdim=True)[0]
        elif self.mode == 'avg':
            if aligned_distance is not None:
                x = x.mul(aligned_distance.unsqueeze(-1))
            x_max = x.sum(
                dim=1, keepdim=True) / num_voxels.type_as(inputs).view(
                    -1, 1, 1)

        if self.last_vfe:
            return x_max
        else:
            x_repeat = x_max.repeat(1, inputs.shape[1], 1)
            x_concatenated = torch.cat([x, x_repeat], dim=2)
            return x_concatenated