"research/slim/nets/vgg.py" did not exist on "beeae0990a4c94c3df4fe8f1867a791d51bcf7a5"
utils.py 6.96 KB
Newer Older
dingchang's avatar
dingchang committed
1
# Copyright (c) OpenMMLab. All rights reserved.
2
3
from typing import Optional

zhangwenwei's avatar
zhangwenwei committed
4
import torch
5
from mmcv.cnn import build_norm_layer
6
from torch import Tensor, nn
zhangwenwei's avatar
zhangwenwei committed
7
8
9
from torch.nn import functional as F


10
11
12
def get_paddings_indicator(actual_num: Tensor,
                           max_num: Tensor,
                           axis: int = 0) -> Tensor:
zhangwenwei's avatar
zhangwenwei committed
13
14
15
    """Create boolean mask by actually number of a padded tensor.

    Args:
zhangwenwei's avatar
zhangwenwei committed
16
17
        actual_num (torch.Tensor): Actual number of points in each voxel.
        max_num (int): Max number of points in each voxel
zhangwenwei's avatar
zhangwenwei committed
18
19

    Returns:
zhangwenwei's avatar
zhangwenwei committed
20
        torch.Tensor: Mask indicates which points are valid inside a voxel.
zhangwenwei's avatar
zhangwenwei committed
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
    """
    actual_num = torch.unsqueeze(actual_num, axis + 1)
    # tiled_actual_num: [N, M, 1]
    max_num_shape = [1] * len(actual_num.shape)
    max_num_shape[axis + 1] = -1
    max_num = torch.arange(
        max_num, dtype=torch.int, device=actual_num.device).view(max_num_shape)
    # tiled_actual_num: [[3,3,3,3,3], [4,4,4,4,4], [2,2,2,2,2]]
    # tiled_max_num: [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]]
    paddings_indicator = actual_num.int() > max_num
    # paddings_indicator shape: [batch_size, max_num]
    return paddings_indicator


class VFELayer(nn.Module):
zhangwenwei's avatar
zhangwenwei committed
36
    """Voxel Feature Encoder layer.
zhangwenwei's avatar
zhangwenwei committed
37
38
39
40
41
42
43
44
45
46
47
48
49
50

    The voxel encoder is composed of a series of these layers.
    This module do not support average pooling and only support to use
    max pooling to gather features inside a VFE.

    Args:
        in_channels (int): Number of input channels.
        out_channels (int): Number of output channels.
        norm_cfg (dict): Config dict of normalization layers
        max_out (bool): Whether aggregate the features of points inside
            each voxel and only return voxel features.
        cat_max (bool): Whether concatenate the aggregated features
            and pointwise features.
    """
zhangwenwei's avatar
zhangwenwei committed
51
52

    def __init__(self,
53
54
55
56
57
58
                 in_channels: int,
                 out_channels: int,
                 norm_cfg: Optional[dict] = dict(
                     type='BN1d', eps=1e-3, momentum=0.01),
                 max_out: Optional[bool] = True,
                 cat_max: Optional[bool] = True):
zhangwenwei's avatar
zhangwenwei committed
59
60
61
62
        super(VFELayer, self).__init__()
        self.cat_max = cat_max
        self.max_out = max_out
        # self.units = int(out_channels / 2)
zhangwenwei's avatar
zhangwenwei committed
63
64
65

        self.norm = build_norm_layer(norm_cfg, out_channels)[1]
        self.linear = nn.Linear(in_channels, out_channels, bias=False)
zhangwenwei's avatar
zhangwenwei committed
66

67
    def forward(self, inputs: Tensor) -> Tensor:
zhangwenwei's avatar
zhangwenwei committed
68
        """Forward function.
zhangwenwei's avatar
zhangwenwei committed
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84

        Args:
            inputs (torch.Tensor): Voxels features of shape (N, M, C).
                N is the number of voxels, M is the number of points in
                voxels, C is the number of channels of point features.

        Returns:
            torch.Tensor: Voxel features. There are three mode under which the
                features have different meaning.
                - `max_out=False`: Return point-wise features in
                    shape (N, M, C).
                - `max_out=True` and `cat_max=False`: Return aggregated
                    voxel features in shape (N, C)
                - `max_out=True` and `cat_max=True`: Return concatenated
                    point-wise features in shape (N, M, C).
        """
zhangwenwei's avatar
zhangwenwei committed
85
86
        # [K, T, 7] tensordot [7, units] = [K, T, units]
        voxel_count = inputs.shape[1]
87

zhangwenwei's avatar
zhangwenwei committed
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
        x = self.linear(inputs)
        x = self.norm(x.permute(0, 2, 1).contiguous()).permute(0, 2,
                                                               1).contiguous()
        pointwise = F.relu(x)
        # [K, T, units]
        if self.max_out:
            aggregated = torch.max(pointwise, dim=1, keepdim=True)[0]
        else:
            # this is for fusion layer
            return pointwise

        if not self.cat_max:
            return aggregated.squeeze(1)
        else:
            # [K, 1, units]
            repeated = aggregated.repeat(1, voxel_count, 1)
            concatenated = torch.cat([pointwise, repeated], dim=2)
            # [K, T, 2 * units]
            return concatenated


class PFNLayer(nn.Module):
zhangwenwei's avatar
zhangwenwei committed
110
    """Pillar Feature Net Layer.
zhangwenwei's avatar
zhangwenwei committed
111
112
113
114
115
116
117

    The Pillar Feature Net is composed of a series of these layers, but the
    PointPillars paper results only used a single PFNLayer.

    Args:
        in_channels (int): Number of input channels.
        out_channels (int): Number of output channels.
118
119
120
121
122
123
        norm_cfg (dict, optional): Config dict of normalization layers.
            Defaults to dict(type='BN1d', eps=1e-3, momentum=0.01).
        last_layer (bool, optional): If last_layer, there is no
            concatenation of features. Defaults to False.
        mode (str, optional): Pooling model to gather features inside voxels.
            Defaults to 'max'.
zhangwenwei's avatar
zhangwenwei committed
124
    """
zhangwenwei's avatar
zhangwenwei committed
125
126

    def __init__(self,
127
128
129
130
131
132
                 in_channels: int,
                 out_channels: int,
                 norm_cfg: Optional[dict] = dict(
                     type='BN1d', eps=1e-3, momentum=0.01),
                 last_layer: Optional[bool] = False,
                 mode: Optional[str] = 'max'):
zhangwenwei's avatar
zhangwenwei committed
133
134
135
136
137
138
139
140

        super().__init__()
        self.name = 'PFNLayer'
        self.last_vfe = last_layer
        if not self.last_vfe:
            out_channels = out_channels // 2
        self.units = out_channels

zhangwenwei's avatar
zhangwenwei committed
141
142
        self.norm = build_norm_layer(norm_cfg, self.units)[1]
        self.linear = nn.Linear(in_channels, self.units, bias=False)
zhangwenwei's avatar
zhangwenwei committed
143

zhangwenwei's avatar
zhangwenwei committed
144
        assert mode in ['max', 'avg']
zhangwenwei's avatar
zhangwenwei committed
145
146
        self.mode = mode

147
148
149
150
    def forward(self,
                inputs: Tensor,
                num_voxels: Optional[Tensor] = None,
                aligned_distance: Optional[Tensor] = None) -> Tensor:
zhangwenwei's avatar
zhangwenwei committed
151
        """Forward function.
zhangwenwei's avatar
zhangwenwei committed
152

zhangwenwei's avatar
zhangwenwei committed
153
154
155
156
157
158
159
160
161
162
163
164
        Args:
            inputs (torch.Tensor): Pillar/Voxel inputs with shape (N, M, C).
                N is the number of voxels, M is the number of points in
                voxels, C is the number of channels of point features.
            num_voxels (torch.Tensor, optional): Number of points in each
                voxel. Defaults to None.
            aligned_distance (torch.Tensor, optional): The distance of
                each points to the voxel center. Defaults to None.

        Returns:
            torch.Tensor: Features of Pillars.
        """
zhangwenwei's avatar
zhangwenwei committed
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
        x = self.linear(inputs)
        x = self.norm(x.permute(0, 2, 1).contiguous()).permute(0, 2,
                                                               1).contiguous()
        x = F.relu(x)

        if self.mode == 'max':
            if aligned_distance is not None:
                x = x.mul(aligned_distance.unsqueeze(-1))
            x_max = torch.max(x, dim=1, keepdim=True)[0]
        elif self.mode == 'avg':
            if aligned_distance is not None:
                x = x.mul(aligned_distance.unsqueeze(-1))
            x_max = x.sum(
                dim=1, keepdim=True) / num_voxels.type_as(inputs).view(
                    -1, 1, 1)

        if self.last_vfe:
            return x_max
        else:
            x_repeat = x_max.repeat(1, inputs.shape[1], 1)
            x_concatenated = torch.cat([x, x_repeat], dim=2)
            return x_concatenated