"git@developer.sourcefind.cn:OpenDAS/torch-sparce.git" did not exist on "70249ef73c91627ac4502218f6c05191a966e3e2"
Commit 37c87f22 authored by zhangwenwei's avatar zhangwenwei
Browse files

Merge branch 'clean_parta2_bbox_head_unittest' into 'master'

Clean parta2 bbox head unittest

See merge request open-mmlab/mmdet.3d!50
parents 19a56f6b 0e4dea21
import numpy as np import numpy as np
import torch import torch
import torch.nn as nn import torch.nn as nn
from mmcv.cnn import ConvModule, build_norm_layer, normal_init, xavier_init from mmcv.cnn import ConvModule, normal_init, xavier_init
import mmdet3d.ops.spconv as spconv import mmdet3d.ops.spconv as spconv
from mmdet3d.core import build_bbox_coder, multi_apply from mmdet3d.core import build_bbox_coder, multi_apply
from mmdet3d.core.bbox import box_torch_ops from mmdet3d.core.bbox import box_torch_ops
from mmdet3d.models.builder import build_loss from mmdet3d.models.builder import build_loss
from mmdet3d.ops import make_sparse_convmodule
from mmdet3d.ops.iou3d.iou3d_utils import (boxes3d_to_bev_torch_lidar, nms_gpu, from mmdet3d.ops.iou3d.iou3d_utils import (boxes3d_to_bev_torch_lidar, nms_gpu,
nms_normal_gpu) nms_normal_gpu)
from mmdet.models import HEADS from mmdet.models import HEADS
...@@ -78,19 +79,18 @@ class PartA2BboxHead(nn.Module): ...@@ -78,19 +79,18 @@ class PartA2BboxHead(nn.Module):
assert down_conv_channels[-1] == shared_fc_channels[0] assert down_conv_channels[-1] == shared_fc_channels[0]
# init layers # init layers
block = self.post_act_block
part_channel_last = part_in_channels part_channel_last = part_in_channels
part_conv = [] part_conv = []
for i, channel in enumerate(part_conv_channels): for i, channel in enumerate(part_conv_channels):
part_conv.append( part_conv.append(
block( make_sparse_convmodule(
part_channel_last, part_channel_last,
channel, channel,
3, 3,
padding=1, padding=1,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
indice_key=f'rcnn_part{i}')) indice_key=f'rcnn_part{i}',
conv_type='SubMConv3d'))
part_channel_last = channel part_channel_last = channel
self.part_conv = spconv.SparseSequential(*part_conv) self.part_conv = spconv.SparseSequential(*part_conv)
...@@ -98,13 +98,14 @@ class PartA2BboxHead(nn.Module): ...@@ -98,13 +98,14 @@ class PartA2BboxHead(nn.Module):
seg_conv = [] seg_conv = []
for i, channel in enumerate(seg_conv_channels): for i, channel in enumerate(seg_conv_channels):
seg_conv.append( seg_conv.append(
block( make_sparse_convmodule(
seg_channel_last, seg_channel_last,
channel, channel,
3, 3,
padding=1, padding=1,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
indice_key=f'rcnn_seg{i}')) indice_key=f'rcnn_seg{i}',
conv_type='SubMConv3d'))
seg_channel_last = channel seg_channel_last = channel
self.seg_conv = spconv.SparseSequential(*seg_conv) self.seg_conv = spconv.SparseSequential(*seg_conv)
...@@ -114,26 +115,28 @@ class PartA2BboxHead(nn.Module): ...@@ -114,26 +115,28 @@ class PartA2BboxHead(nn.Module):
merge_conv = [] merge_conv = []
for i, channel in enumerate(merge_conv_channels): for i, channel in enumerate(merge_conv_channels):
merge_conv.append( merge_conv.append(
block( make_sparse_convmodule(
merge_conv_channel_last, merge_conv_channel_last,
channel, channel,
3, 3,
padding=1, padding=1,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
indice_key=f'rcnn_down0')) indice_key=f'rcnn_down0',
conv_type='SubMConv3d'))
merge_conv_channel_last = channel merge_conv_channel_last = channel
down_conv_channel_last = merge_conv_channel_last down_conv_channel_last = merge_conv_channel_last
conv_down = [] conv_down = []
for i, channel in enumerate(down_conv_channels): for i, channel in enumerate(down_conv_channels):
conv_down.append( conv_down.append(
block( make_sparse_convmodule(
down_conv_channel_last, down_conv_channel_last,
channel, channel,
3, 3,
padding=1, padding=1,
norm_cfg=norm_cfg, norm_cfg=norm_cfg,
indice_key=f'rcnn_down1')) indice_key=f'rcnn_down1',
conv_type='SubMConv3d'))
down_conv_channel_last = channel down_conv_channel_last = channel
self.conv_down.add_module('merge_conv', self.conv_down.add_module('merge_conv',
...@@ -228,69 +231,6 @@ class PartA2BboxHead(nn.Module): ...@@ -228,69 +231,6 @@ class PartA2BboxHead(nn.Module):
normal_init(self.conv_reg[-1].conv, mean=0, std=0.001) normal_init(self.conv_reg[-1].conv, mean=0, std=0.001)
def post_act_block(self,
in_channels,
out_channels,
kernel_size,
indice_key,
stride=1,
padding=0,
conv_type='subm',
norm_cfg=None):
"""Make post activate sparse convolution block.
Args:
in_channels (int): the number of input channels
out_channels (int): the number of out channels
kernel_size (int): kernel size of convolution
indice_key (str): the indice key used for sparse tensor
stride (int): the stride of convolution
padding (int or list[int]): the padding number of input
conv_type (str): conv type in 'subm', 'spconv' or 'inverseconv'
norm_cfg (dict[str]): config of normalization layer
Returns:
spconv.SparseSequential: post activate sparse convolution block.
"""
# TODO: clean post_act_block by existing bottlnecks.
assert conv_type in ['subm', 'spconv', 'inverseconv']
if conv_type == 'subm':
m = spconv.SparseSequential(
spconv.SubMConv3d(
in_channels,
out_channels,
kernel_size,
bias=False,
indice_key=indice_key),
build_norm_layer(norm_cfg, out_channels)[1],
nn.ReLU(inplace=True))
elif conv_type == 'spconv':
m = spconv.SparseSequential(
spconv.SparseConv3d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=False,
indice_key=indice_key),
build_norm_layer(norm_cfg, out_channels)[1],
nn.ReLU(inplace=True))
elif conv_type == 'inverseconv':
m = spconv.SparseSequential(
spconv.SparseInverseConv3d(
in_channels,
out_channels,
kernel_size,
bias=False,
indice_key=indice_key),
build_norm_layer(norm_cfg, out_channels)[1],
nn.ReLU(inplace=True))
else:
raise NotImplementedError
return m
def forward(self, seg_feats, part_feats): def forward(self, seg_feats, part_feats):
# (B * N, out_x, out_y, out_z, 4) # (B * N, out_x, out_y, out_z, 4)
rcnn_batch_size = part_feats.shape[0] rcnn_batch_size = part_feats.shape[0]
......
...@@ -6,6 +6,21 @@ from mmdet.models.backbones.resnet import BasicBlock, Bottleneck ...@@ -6,6 +6,21 @@ from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
class SparseBottleneck(Bottleneck, spconv.SparseModule): class SparseBottleneck(Bottleneck, spconv.SparseModule):
"""Sparse bottleneck block for PartA^2.
Bottleneck block implemented with submanifold sparse convolution.
Args:
inplanes (int): inplanes of block.
planes (int): planes of block.
stride (int): stride of the first block. Default: 1
downsample (None | Module): down sample module for block.
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
"""
expansion = 4 expansion = 4
def __init__(self, def __init__(self,
...@@ -15,10 +30,7 @@ class SparseBottleneck(Bottleneck, spconv.SparseModule): ...@@ -15,10 +30,7 @@ class SparseBottleneck(Bottleneck, spconv.SparseModule):
downsample=None, downsample=None,
conv_cfg=None, conv_cfg=None,
norm_cfg=None): norm_cfg=None):
"""Sparse bottleneck block for PartA^2.
Bottleneck block implemented with submanifold sparse convolution.
"""
spconv.SparseModule.__init__(self) spconv.SparseModule.__init__(self)
Bottleneck.__init__( Bottleneck.__init__(
self, self,
...@@ -53,6 +65,21 @@ class SparseBottleneck(Bottleneck, spconv.SparseModule): ...@@ -53,6 +65,21 @@ class SparseBottleneck(Bottleneck, spconv.SparseModule):
class SparseBasicBlock(BasicBlock, spconv.SparseModule): class SparseBasicBlock(BasicBlock, spconv.SparseModule):
"""Sparse basic block for PartA^2.
Sparse basic block implemented with submanifold sparse convolution.
Args:
inplanes (int): inplanes of block.
planes (int): planes of block.
stride (int): stride of the first block. Default: 1
downsample (None | Module): down sample module for block.
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
"""
expansion = 1 expansion = 1
def __init__(self, def __init__(self,
...@@ -62,10 +89,6 @@ class SparseBasicBlock(BasicBlock, spconv.SparseModule): ...@@ -62,10 +89,6 @@ class SparseBasicBlock(BasicBlock, spconv.SparseModule):
downsample=None, downsample=None,
conv_cfg=None, conv_cfg=None,
norm_cfg=None): norm_cfg=None):
"""Sparse basic block for PartA^2.
Sparse basic block implemented with submanifold sparse convolution.
"""
spconv.SparseModule.__init__(self) spconv.SparseModule.__init__(self)
BasicBlock.__init__( BasicBlock.__init__(
self, self,
...@@ -125,6 +148,7 @@ def make_sparse_convmodule(in_channels, ...@@ -125,6 +148,7 @@ def make_sparse_convmodule(in_channels,
spconv.SparseSequential: sparse convolution module. spconv.SparseSequential: sparse convolution module.
""" """
assert isinstance(order, tuple) and len(order) <= 3 assert isinstance(order, tuple) and len(order) <= 3
assert set(order) | {'conv', 'norm', 'act'} == {'conv', 'norm', 'act'}
conv_cfg = dict(type=conv_type, indice_key=indice_key) conv_cfg = dict(type=conv_type, indice_key=indice_key)
......
This diff is collapsed.
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment