"docs/source/api/vscode:/vscode.git/clone" did not exist on "56b5d0e53add8c436c90e94c12391e2efdf49490"
Unverified Commit fa724b10 authored by Quantum Cat's avatar Quantum Cat Committed by GitHub
Browse files

Add type hint for middle_encoder and voxel_encoder (#2556)

* 2023/05/26 add type hint

* 2023/05/26 modify ugly typehint
parent 8e634dd1
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
from typing import List
import torch import torch
from torch import nn from torch import Tensor, nn
from mmdet3d.registry import MODELS from mmdet3d.registry import MODELS
...@@ -16,14 +18,17 @@ class PointPillarsScatter(nn.Module): ...@@ -16,14 +18,17 @@ class PointPillarsScatter(nn.Module):
output_shape (list[int]): Required output shape of features. output_shape (list[int]): Required output shape of features.
""" """
def __init__(self, in_channels, output_shape): def __init__(self, in_channels: int, output_shape: List[int]):
super().__init__() super().__init__()
self.output_shape = output_shape self.output_shape = output_shape
self.ny = output_shape[0] self.ny = output_shape[0]
self.nx = output_shape[1] self.nx = output_shape[1]
self.in_channels = in_channels self.in_channels = in_channels
def forward(self, voxel_features, coors, batch_size=None): def forward(self,
voxel_features: Tensor,
coors: Tensor,
batch_size: int = None) -> Tensor:
"""Foraward function to scatter features.""" """Foraward function to scatter features."""
# TODO: rewrite the function in a batch manner # TODO: rewrite the function in a batch manner
# no need to deal with different batch cases # no need to deal with different batch cases
...@@ -32,7 +37,7 @@ class PointPillarsScatter(nn.Module): ...@@ -32,7 +37,7 @@ class PointPillarsScatter(nn.Module):
else: else:
return self.forward_single(voxel_features, coors) return self.forward_single(voxel_features, coors)
def forward_single(self, voxel_features, coors): def forward_single(self, voxel_features: Tensor, coors: Tensor) -> Tensor:
"""Scatter features of single sample. """Scatter features of single sample.
Args: Args:
...@@ -56,7 +61,8 @@ class PointPillarsScatter(nn.Module): ...@@ -56,7 +61,8 @@ class PointPillarsScatter(nn.Module):
canvas = canvas.view(1, self.in_channels, self.ny, self.nx) canvas = canvas.view(1, self.in_channels, self.ny, self.nx)
return canvas return canvas
def forward_batch(self, voxel_features, coors, batch_size): def forward_batch(self, voxel_features: Tensor, coors: Tensor,
batch_size: int) -> Tensor:
"""Scatter features of single sample. """Scatter features of single sample.
Args: Args:
......
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Tuple from typing import Dict, List, Optional, Tuple, Union
import torch import torch
from mmcv.ops import points_in_boxes_all, three_interpolate, three_nn from mmcv.ops import points_in_boxes_all, three_interpolate, three_nn
...@@ -18,6 +18,8 @@ if IS_SPCONV2_AVAILABLE: ...@@ -18,6 +18,8 @@ if IS_SPCONV2_AVAILABLE:
else: else:
from mmcv.ops import SparseConvTensor, SparseSequential from mmcv.ops import SparseConvTensor, SparseSequential
TwoTupleIntType = Tuple[Tuple[int]]
@MODELS.register_module() @MODELS.register_module()
class SparseEncoder(nn.Module): class SparseEncoder(nn.Module):
...@@ -26,7 +28,7 @@ class SparseEncoder(nn.Module): ...@@ -26,7 +28,7 @@ class SparseEncoder(nn.Module):
Args: Args:
in_channels (int): The number of input channels. in_channels (int): The number of input channels.
sparse_shape (list[int]): The sparse shape of input tensor. sparse_shape (list[int]): The sparse shape of input tensor.
order (list[str], optional): Order of conv module. order (tuple[str], optional): Order of conv module.
Defaults to ('conv', 'norm', 'act'). Defaults to ('conv', 'norm', 'act').
norm_cfg (dict, optional): Config of normalization layer. Defaults to norm_cfg (dict, optional): Config of normalization layer. Defaults to
dict(type='BN1d', eps=1e-3, momentum=0.01). dict(type='BN1d', eps=1e-3, momentum=0.01).
...@@ -46,19 +48,24 @@ class SparseEncoder(nn.Module): ...@@ -46,19 +48,24 @@ class SparseEncoder(nn.Module):
Default to False. Default to False.
""" """
def __init__(self, def __init__(
in_channels, self,
sparse_shape, in_channels: int,
order=('conv', 'norm', 'act'), sparse_shape: List[int],
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), order: Optional[Tuple[str]] = ('conv', 'norm', 'act'),
base_channels=16, norm_cfg: Optional[dict] = dict(
output_channels=128, type='BN1d', eps=1e-3, momentum=0.01),
encoder_channels=((16, ), (32, 32, 32), (64, 64, 64), (64, 64, base_channels: Optional[int] = 16,
64)), output_channels: Optional[int] = 128,
encoder_paddings=((1, ), (1, 1, 1), (1, 1, 1), ((0, 1, 1), 1, encoder_channels: Optional[TwoTupleIntType] = ((16, ), (32, 32,
1)), 32),
block_type='conv_module', (64, 64,
return_middle_feats=False): 64), (64, 64, 64)),
encoder_paddings: Optional[TwoTupleIntType] = ((1, ), (1, 1, 1),
(1, 1, 1),
((0, 1, 1), 1, 1)),
block_type: Optional[str] = 'conv_module',
return_middle_feats: Optional[bool] = False):
super().__init__() super().__init__()
assert block_type in ['conv_module', 'basicblock'] assert block_type in ['conv_module', 'basicblock']
self.sparse_shape = sparse_shape self.sparse_shape = sparse_shape
...@@ -112,7 +119,8 @@ class SparseEncoder(nn.Module): ...@@ -112,7 +119,8 @@ class SparseEncoder(nn.Module):
conv_type='SparseConv3d') conv_type='SparseConv3d')
@amp.autocast(enabled=False) @amp.autocast(enabled=False)
def forward(self, voxel_features, coors, batch_size): def forward(self, voxel_features: Tensor, coors: Tensor,
batch_size: int) -> Union[Tensor, Tuple[Tensor, list]]:
"""Forward of SparseEncoder. """Forward of SparseEncoder.
Args: Args:
...@@ -154,12 +162,14 @@ class SparseEncoder(nn.Module): ...@@ -154,12 +162,14 @@ class SparseEncoder(nn.Module):
else: else:
return spatial_features return spatial_features
def make_encoder_layers(self, def make_encoder_layers(
make_block, self,
norm_cfg, make_block: nn.Module,
in_channels, norm_cfg: Dict,
block_type='conv_module', in_channels: int,
conv_cfg=dict(type='SubMConv3d')): block_type: Optional[str] = 'conv_module',
conv_cfg: Optional[dict] = dict(type='SubMConv3d')
) -> int:
"""make encoder layers using sparse convs. """make encoder layers using sparse convs.
Args: Args:
...@@ -256,16 +266,20 @@ class SparseEncoderSASSD(SparseEncoder): ...@@ -256,16 +266,20 @@ class SparseEncoderSASSD(SparseEncoder):
Defaults to 'conv_module'. Defaults to 'conv_module'.
""" """
def __init__(self, def __init__(
self,
in_channels: int, in_channels: int,
sparse_shape: List[int], sparse_shape: List[int],
order: Tuple[str] = ('conv', 'norm', 'act'), order: Tuple[str] = ('conv', 'norm', 'act'),
norm_cfg: dict = dict(type='BN1d', eps=1e-3, momentum=0.01), norm_cfg: dict = dict(type='BN1d', eps=1e-3, momentum=0.01),
base_channels: int = 16, base_channels: int = 16,
output_channels: int = 128, output_channels: int = 128,
encoder_channels: Tuple[tuple] = ((16, ), (32, 32, 32), encoder_channels: Optional[TwoTupleIntType] = ((16, ), (32, 32,
(64, 64, 64), (64, 64, 64)), 32),
encoder_paddings: Tuple[tuple] = ((1, ), (1, 1, 1), (1, 1, 1), (64, 64,
64), (64, 64, 64)),
encoder_paddings: Optional[TwoTupleIntType] = ((1, ), (1, 1, 1),
(1, 1, 1),
((0, 1, 1), 1, 1)), ((0, 1, 1), 1, 1)),
block_type: str = 'conv_module'): block_type: str = 'conv_module'):
super(SparseEncoderSASSD, self).__init__( super(SparseEncoderSASSD, self).__init__(
......
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
from typing import Dict, List, Optional, Tuple
import torch import torch
from torch import Tensor, nn
from mmdet3d.models.layers.spconv import IS_SPCONV2_AVAILABLE from mmdet3d.models.layers.spconv import IS_SPCONV2_AVAILABLE
...@@ -14,6 +17,8 @@ from mmdet3d.models.layers import SparseBasicBlock, make_sparse_convmodule ...@@ -14,6 +17,8 @@ from mmdet3d.models.layers import SparseBasicBlock, make_sparse_convmodule
from mmdet3d.models.layers.sparse_block import replace_feature from mmdet3d.models.layers.sparse_block import replace_feature
from mmdet3d.registry import MODELS from mmdet3d.registry import MODELS
TwoTupleIntType = Tuple[Tuple[int]]
@MODELS.register_module() @MODELS.register_module()
class SparseUNet(BaseModule): class SparseUNet(BaseModule):
...@@ -35,21 +40,28 @@ class SparseUNet(BaseModule): ...@@ -35,21 +40,28 @@ class SparseUNet(BaseModule):
decoder_paddings (tuple[tuple[int]]): Paddings of each decode block. decoder_paddings (tuple[tuple[int]]): Paddings of each decode block.
""" """
def __init__(self, def __init__(
in_channels, self,
sparse_shape, in_channels: int,
order=('conv', 'norm', 'act'), sparse_shape: List[int],
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), order: Tuple[str] = ('conv', 'norm', 'act'),
base_channels=16, norm_cfg: dict = dict(type='BN1d', eps=1e-3, momentum=0.01),
output_channels=128, base_channels: int = 16,
encoder_channels=((16, ), (32, 32, 32), (64, 64, 64), (64, 64, output_channels: int = 128,
64)), encoder_channels: Optional[TwoTupleIntType] = ((16, ), (32, 32,
encoder_paddings=((1, ), (1, 1, 1), (1, 1, 1), ((0, 1, 1), 1, 32),
1)), (64, 64,
decoder_channels=((64, 64, 64), (64, 64, 32), (32, 32, 16), 64), (64, 64, 64)),
(16, 16, 16)), encoder_paddings: Optional[TwoTupleIntType] = ((1, ), (1, 1, 1),
decoder_paddings=((1, 0), (1, 0), (0, 0), (0, 1)), (1, 1, 1),
init_cfg=None): ((0, 1, 1), 1, 1)),
decoder_channels: Optional[TwoTupleIntType] = ((64, 64,
64), (64, 64, 32),
(32, 32,
16), (16, 16, 16)),
decoder_paddings: Optional[TwoTupleIntType] = ((1, 0), (1, 0),
(0, 0), (0, 1)),
init_cfg: bool = None):
super().__init__(init_cfg=init_cfg) super().__init__(init_cfg=init_cfg)
self.sparse_shape = sparse_shape self.sparse_shape = sparse_shape
self.in_channels = in_channels self.in_channels = in_channels
...@@ -101,7 +113,8 @@ class SparseUNet(BaseModule): ...@@ -101,7 +113,8 @@ class SparseUNet(BaseModule):
indice_key='spconv_down2', indice_key='spconv_down2',
conv_type='SparseConv3d') conv_type='SparseConv3d')
def forward(self, voxel_features, coors, batch_size): def forward(self, voxel_features: Tensor, coors: Tensor,
batch_size: int) -> Dict[str, Tensor]:
"""Forward of SparseUNet. """Forward of SparseUNet.
Args: Args:
...@@ -152,8 +165,10 @@ class SparseUNet(BaseModule): ...@@ -152,8 +165,10 @@ class SparseUNet(BaseModule):
return ret return ret
def decoder_layer_forward(self, x_lateral, x_bottom, lateral_layer, def decoder_layer_forward(
merge_layer, upsample_layer): self, x_lateral: SparseConvTensor, x_bottom: SparseConvTensor,
lateral_layer: SparseBasicBlock, merge_layer: SparseSequential,
upsample_layer: SparseSequential) -> SparseConvTensor:
"""Forward of upsample and residual block. """Forward of upsample and residual block.
Args: Args:
...@@ -176,7 +191,8 @@ class SparseUNet(BaseModule): ...@@ -176,7 +191,8 @@ class SparseUNet(BaseModule):
return x return x
@staticmethod @staticmethod
def reduce_channel(x, out_channels): def reduce_channel(x: SparseConvTensor,
out_channels: int) -> SparseConvTensor:
"""reduce channel for element-wise addition. """reduce channel for element-wise addition.
Args: Args:
...@@ -194,7 +210,8 @@ class SparseUNet(BaseModule): ...@@ -194,7 +210,8 @@ class SparseUNet(BaseModule):
x = replace_feature(x, features.view(n, out_channels, -1).sum(dim=2)) x = replace_feature(x, features.view(n, out_channels, -1).sum(dim=2))
return x return x
def make_encoder_layers(self, make_block, norm_cfg, in_channels): def make_encoder_layers(self, make_block: nn.Module, norm_cfg: dict,
in_channels: int) -> int:
"""make encoder layers using sparse convs. """make encoder layers using sparse convs.
Args: Args:
...@@ -240,7 +257,8 @@ class SparseUNet(BaseModule): ...@@ -240,7 +257,8 @@ class SparseUNet(BaseModule):
self.encoder_layers.add_module(stage_name, stage_layers) self.encoder_layers.add_module(stage_name, stage_layers)
return out_channels return out_channels
def make_decoder_layers(self, make_block, norm_cfg, in_channels): def make_decoder_layers(self, make_block: nn.Module, norm_cfg: dict,
in_channels: int) -> int:
"""make decoder layers using sparse convs. """make decoder layers using sparse convs.
Args: Args:
......
...@@ -7,12 +7,13 @@ import torch.nn as nn ...@@ -7,12 +7,13 @@ import torch.nn as nn
from mmcv.cnn import ConvModule from mmcv.cnn import ConvModule
from mmcv.ops.furthest_point_sample import furthest_point_sample from mmcv.ops.furthest_point_sample import furthest_point_sample
from mmengine.model import BaseModule from mmengine.model import BaseModule
from torch import Tensor
from mmdet3d.registry import MODELS from mmdet3d.registry import MODELS
from mmdet3d.utils import InstanceList from mmdet3d.utils import InstanceList
def bilinear_interpolate_torch(inputs, x, y): def bilinear_interpolate_torch(inputs: Tensor, x: Tensor, y: Tensor) -> Tensor:
"""Bilinear interpolate for inputs.""" """Bilinear interpolate for inputs."""
x0 = torch.floor(x).long() x0 = torch.floor(x).long()
x1 = x0 + 1 x1 = x0 + 1
......
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Tuple
import torch import torch
from mmcv.cnn import build_norm_layer from mmcv.cnn import build_norm_layer
from mmcv.ops import DynamicScatter from mmcv.ops import DynamicScatter
from torch import nn from torch import Tensor, nn
from mmdet3d.registry import MODELS from mmdet3d.registry import MODELS
from .utils import PFNLayer, get_paddings_indicator from .utils import PFNLayer, get_paddings_indicator
...@@ -37,16 +39,18 @@ class PillarFeatureNet(nn.Module): ...@@ -37,16 +39,18 @@ class PillarFeatureNet(nn.Module):
""" """
def __init__(self, def __init__(self,
in_channels=4, in_channels: Optional[int] = 4,
feat_channels=(64, ), feat_channels: Optional[tuple] = (64, ),
with_distance=False, with_distance: Optional[bool] = False,
with_cluster_center=True, with_cluster_center: Optional[bool] = True,
with_voxel_center=True, with_voxel_center: Optional[bool] = True,
voxel_size=(0.2, 0.2, 4), voxel_size: Optional[Tuple[float]] = (0.2, 0.2, 4),
point_cloud_range=(0, -40, -3, 70.4, 40, 1), point_cloud_range: Optional[Tuple[float]] = (0, -40, -3, 70.4,
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), 40, 1),
mode='max', norm_cfg: Optional[dict] = dict(
legacy=True): type='BN1d', eps=1e-3, momentum=0.01),
mode: Optional[str] = 'max',
legacy: Optional[bool] = True):
super(PillarFeatureNet, self).__init__() super(PillarFeatureNet, self).__init__()
assert len(feat_channels) > 0 assert len(feat_channels) > 0
self.legacy = legacy self.legacy = legacy
...@@ -88,7 +92,8 @@ class PillarFeatureNet(nn.Module): ...@@ -88,7 +92,8 @@ class PillarFeatureNet(nn.Module):
self.z_offset = self.vz / 2 + point_cloud_range[2] self.z_offset = self.vz / 2 + point_cloud_range[2]
self.point_cloud_range = point_cloud_range self.point_cloud_range = point_cloud_range
def forward(self, features, num_points, coors, *args, **kwargs): def forward(self, features: Tensor, num_points: Tensor, coors: Tensor,
*args, **kwargs) -> Tensor:
"""Forward function. """Forward function.
Args: Args:
...@@ -187,16 +192,18 @@ class DynamicPillarFeatureNet(PillarFeatureNet): ...@@ -187,16 +192,18 @@ class DynamicPillarFeatureNet(PillarFeatureNet):
""" """
def __init__(self, def __init__(self,
in_channels=4, in_channels: Optional[int] = 4,
feat_channels=(64, ), feat_channels: Optional[tuple] = (64, ),
with_distance=False, with_distance: Optional[bool] = False,
with_cluster_center=True, with_cluster_center: Optional[bool] = True,
with_voxel_center=True, with_voxel_center: Optional[bool] = True,
voxel_size=(0.2, 0.2, 4), voxel_size: Optional[Tuple[float]] = (0.2, 0.2, 4),
point_cloud_range=(0, -40, -3, 70.4, 40, 1), point_cloud_range: Optional[Tuple[float]] = (0, -40, -3, 70.4,
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), 40, 1),
mode='max', norm_cfg: Optional[dict] = dict(
legacy=True): type='BN1d', eps=1e-3, momentum=0.01),
mode: Optional[str] = 'max',
legacy: Optional[bool] = True):
super(DynamicPillarFeatureNet, self).__init__( super(DynamicPillarFeatureNet, self).__init__(
in_channels, in_channels,
feat_channels, feat_channels,
...@@ -229,7 +236,8 @@ class DynamicPillarFeatureNet(PillarFeatureNet): ...@@ -229,7 +236,8 @@ class DynamicPillarFeatureNet(PillarFeatureNet):
self.cluster_scatter = DynamicScatter( self.cluster_scatter = DynamicScatter(
voxel_size, point_cloud_range, average_points=True) voxel_size, point_cloud_range, average_points=True)
def map_voxel_center_to_point(self, pts_coors, voxel_mean, voxel_coors): def map_voxel_center_to_point(self, pts_coors: Tensor, voxel_mean: Tensor,
voxel_coors: Tensor) -> Tensor:
"""Map the centers of voxels to its corresponding points. """Map the centers of voxels to its corresponding points.
Args: Args:
...@@ -268,7 +276,7 @@ class DynamicPillarFeatureNet(PillarFeatureNet): ...@@ -268,7 +276,7 @@ class DynamicPillarFeatureNet(PillarFeatureNet):
center_per_point = canvas[:, voxel_index.long()].t() center_per_point = canvas[:, voxel_index.long()].t()
return center_per_point return center_per_point
def forward(self, features, coors): def forward(self, features: Tensor, coors: Tensor) -> Tensor:
"""Forward function. """Forward function.
Args: Args:
......
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch import torch
from mmcv.cnn import build_norm_layer from mmcv.cnn import build_norm_layer
from torch import nn from torch import Tensor, nn
from torch.nn import functional as F from torch.nn import functional as F
def get_paddings_indicator(actual_num, max_num, axis=0): def get_paddings_indicator(actual_num: Tensor,
max_num: Tensor,
axis: int = 0) -> Tensor:
"""Create boolean mask by actually number of a padded tensor. """Create boolean mask by actually number of a padded tensor.
Args: Args:
...@@ -46,11 +50,12 @@ class VFELayer(nn.Module): ...@@ -46,11 +50,12 @@ class VFELayer(nn.Module):
""" """
def __init__(self, def __init__(self,
in_channels, in_channels: int,
out_channels, out_channels: int,
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), norm_cfg: Optional[dict] = dict(
max_out=True, type='BN1d', eps=1e-3, momentum=0.01),
cat_max=True): max_out: Optional[bool] = True,
cat_max: Optional[bool] = True):
super(VFELayer, self).__init__() super(VFELayer, self).__init__()
self.cat_max = cat_max self.cat_max = cat_max
self.max_out = max_out self.max_out = max_out
...@@ -59,7 +64,7 @@ class VFELayer(nn.Module): ...@@ -59,7 +64,7 @@ class VFELayer(nn.Module):
self.norm = build_norm_layer(norm_cfg, out_channels)[1] self.norm = build_norm_layer(norm_cfg, out_channels)[1]
self.linear = nn.Linear(in_channels, out_channels, bias=False) self.linear = nn.Linear(in_channels, out_channels, bias=False)
def forward(self, inputs): def forward(self, inputs: Tensor) -> Tensor:
"""Forward function. """Forward function.
Args: Args:
...@@ -119,11 +124,12 @@ class PFNLayer(nn.Module): ...@@ -119,11 +124,12 @@ class PFNLayer(nn.Module):
""" """
def __init__(self, def __init__(self,
in_channels, in_channels: int,
out_channels, out_channels: int,
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), norm_cfg: Optional[dict] = dict(
last_layer=False, type='BN1d', eps=1e-3, momentum=0.01),
mode='max'): last_layer: Optional[bool] = False,
mode: Optional[str] = 'max'):
super().__init__() super().__init__()
self.name = 'PFNLayer' self.name = 'PFNLayer'
...@@ -138,7 +144,10 @@ class PFNLayer(nn.Module): ...@@ -138,7 +144,10 @@ class PFNLayer(nn.Module):
assert mode in ['max', 'avg'] assert mode in ['max', 'avg']
self.mode = mode self.mode = mode
def forward(self, inputs, num_voxels=None, aligned_distance=None): def forward(self,
inputs: Tensor,
num_voxels: Optional[Tensor] = None,
aligned_distance: Optional[Tensor] = None) -> Tensor:
"""Forward function. """Forward function.
Args: Args:
......
...@@ -57,13 +57,14 @@ class DynamicSimpleVFE(nn.Module): ...@@ -57,13 +57,14 @@ class DynamicSimpleVFE(nn.Module):
""" """
def __init__(self, def __init__(self,
voxel_size=(0.2, 0.2, 4), voxel_size: Tuple[float] = (0.2, 0.2, 4),
point_cloud_range=(0, -40, -3, 70.4, 40, 1)): point_cloud_range: Tuple[float] = (0, -40, -3, 70.4, 40, 1)):
super(DynamicSimpleVFE, self).__init__() super(DynamicSimpleVFE, self).__init__()
self.scatter = DynamicScatter(voxel_size, point_cloud_range, True) self.scatter = DynamicScatter(voxel_size, point_cloud_range, True)
@torch.no_grad() @torch.no_grad()
def forward(self, features, coors, *args, **kwargs): def forward(self, features: Tensor, coors: Tensor, *args,
**kwargs) -> Tensor:
"""Forward function. """Forward function.
Args: Args:
...@@ -114,17 +115,17 @@ class DynamicVFE(nn.Module): ...@@ -114,17 +115,17 @@ class DynamicVFE(nn.Module):
""" """
def __init__(self, def __init__(self,
in_channels=4, in_channels: int = 4,
feat_channels=[], feat_channels: list = [],
with_distance=False, with_distance: bool = False,
with_cluster_center=False, with_cluster_center: bool = False,
with_voxel_center=False, with_voxel_center: bool = False,
voxel_size=(0.2, 0.2, 4), voxel_size: Tuple[float] = (0.2, 0.2, 4),
point_cloud_range=(0, -40, -3, 70.4, 40, 1), point_cloud_range: Tuple[float] = (0, -40, -3, 70.4, 40, 1),
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), norm_cfg: dict = dict(type='BN1d', eps=1e-3, momentum=0.01),
mode='max', mode: str = 'max',
fusion_layer=None, fusion_layer: dict = None,
return_point_feats=False): return_point_feats: bool = False):
super(DynamicVFE, self).__init__() super(DynamicVFE, self).__init__()
assert mode in ['avg', 'max'] assert mode in ['avg', 'max']
assert len(feat_channels) > 0 assert len(feat_channels) > 0
...@@ -171,7 +172,8 @@ class DynamicVFE(nn.Module): ...@@ -171,7 +172,8 @@ class DynamicVFE(nn.Module):
if fusion_layer is not None: if fusion_layer is not None:
self.fusion_layer = MODELS.build(fusion_layer) self.fusion_layer = MODELS.build(fusion_layer)
def map_voxel_center_to_point(self, pts_coors, voxel_mean, voxel_coors): def map_voxel_center_to_point(self, pts_coors: Tensor, voxel_mean: Tensor,
voxel_coors: Tensor) -> Tensor:
"""Map voxel features to its corresponding points. """Map voxel features to its corresponding points.
Args: Args:
...@@ -214,13 +216,13 @@ class DynamicVFE(nn.Module): ...@@ -214,13 +216,13 @@ class DynamicVFE(nn.Module):
return center_per_point return center_per_point
def forward(self, def forward(self,
features, features: Tensor,
coors, coors: Tensor,
points=None, points: Optional[Sequence[Tensor]] = None,
img_feats=None, img_feats: Optional[Sequence[Tensor]] = None,
img_metas=None, img_metas: Optional[dict] = None,
*args, *args,
**kwargs): **kwargs) -> tuple:
"""Forward functions. """Forward functions.
Args: Args:
...@@ -313,17 +315,17 @@ class HardVFE(nn.Module): ...@@ -313,17 +315,17 @@ class HardVFE(nn.Module):
""" """
def __init__(self, def __init__(self,
in_channels=4, in_channels: int = 4,
feat_channels=[], feat_channels: list = [],
with_distance=False, with_distance: bool = False,
with_cluster_center=False, with_cluster_center: bool = False,
with_voxel_center=False, with_voxel_center: bool = False,
voxel_size=(0.2, 0.2, 4), voxel_size: Tuple[float] = (0.2, 0.2, 4),
point_cloud_range=(0, -40, -3, 70.4, 40, 1), point_cloud_range: Tuple[float] = (0, -40, -3, 70.4, 40, 1),
norm_cfg=dict(type='BN1d', eps=1e-3, momentum=0.01), norm_cfg: dict = dict(type='BN1d', eps=1e-3, momentum=0.01),
mode='max', mode: str = 'max',
fusion_layer=None, fusion_layer: dict = None,
return_point_feats=False): return_point_feats: bool = False):
super(HardVFE, self).__init__() super(HardVFE, self).__init__()
assert len(feat_channels) > 0 assert len(feat_channels) > 0
if with_cluster_center: if with_cluster_center:
...@@ -379,13 +381,13 @@ class HardVFE(nn.Module): ...@@ -379,13 +381,13 @@ class HardVFE(nn.Module):
self.fusion_layer = MODELS.build(fusion_layer) self.fusion_layer = MODELS.build(fusion_layer)
def forward(self, def forward(self,
features, features: Tensor,
num_points, num_points: Tensor,
coors, coors: Tensor,
img_feats=None, img_feats: Optional[Sequence[Tensor]] = None,
img_metas=None, img_metas: Optional[dict] = None,
*args, *args,
**kwargs): **kwargs) -> tuple:
"""Forward functions. """Forward functions.
Args: Args:
...@@ -448,8 +450,10 @@ class HardVFE(nn.Module): ...@@ -448,8 +450,10 @@ class HardVFE(nn.Module):
return voxel_feats return voxel_feats
def fusion_with_mask(self, features, mask, voxel_feats, coors, img_feats, def fusion_with_mask(self, features: Tensor, mask: Tensor,
img_metas): voxel_feats: Tensor, coors: Tensor,
img_feats: Sequence[Tensor],
img_metas: Sequence[dict]) -> Tensor:
"""Fuse image and point features with mask. """Fuse image and point features with mask.
Args: Args:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment