"git@developer.sourcefind.cn:ox696c/ktransformers.git" did not exist on "021822dd01b0ade6690ad358a46a4829de55ec84"
Unverified Commit 4e3cfbe4 authored by VVsssssk's avatar VVsssssk Committed by GitHub
Browse files

[Fix] Fix tests dir (#1704)

* format ut dir tree

* ad pytest skip
parent 4a3f90f6
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
def test_pointnet_fp_module():
if not torch.cuda.is_available():
pytest.skip()
from mmdet3d.models.layers import PointFPModule
self = PointFPModule(mlp_channels=[24, 16]).cuda()
assert self.mlps.layer0.conv.in_channels == 24
assert self.mlps.layer0.conv.out_channels == 16
xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin',
np.float32).reshape((-1, 6))
# (B, N, 3)
xyz1 = torch.from_numpy(xyz[0::2, :3]).view(1, -1, 3).cuda()
# (B, C1, N)
features1 = xyz1.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda()
# (B, M, 3)
xyz2 = torch.from_numpy(xyz[1::3, :3]).view(1, -1, 3).cuda()
# (B, C2, N)
features2 = xyz2.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda()
fp_features = self(xyz1, xyz2, features1, features2)
assert fp_features.shape == torch.Size([1, 16, 50])
...@@ -206,29 +206,3 @@ def test_pointnet_sa_module(): ...@@ -206,29 +206,3 @@ def test_pointnet_sa_module():
assert new_xyz.shape == torch.Size([1, 16, 3]) assert new_xyz.shape == torch.Size([1, 16, 3])
assert new_features.shape == torch.Size([1, 32, 16]) assert new_features.shape == torch.Size([1, 32, 16])
assert inds.shape == torch.Size([1, 16]) assert inds.shape == torch.Size([1, 16])
def test_pointnet_fp_module():
if not torch.cuda.is_available():
pytest.skip()
from mmdet3d.models.layers import PointFPModule
self = PointFPModule(mlp_channels=[24, 16]).cuda()
assert self.mlps.layer0.conv.in_channels == 24
assert self.mlps.layer0.conv.out_channels == 16
xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin',
np.float32).reshape((-1, 6))
# (B, N, 3)
xyz1 = torch.from_numpy(xyz[0::2, :3]).view(1, -1, 3).cuda()
# (B, C1, N)
features1 = xyz1.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda()
# (B, M, 3)
xyz2 = torch.from_numpy(xyz[1::3, :3]).view(1, -1, 3).cuda()
# (B, C2, N)
features2 = xyz2.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda()
fp_features = self(xyz1, xyz2, features1, features2)
assert fp_features.shape == torch.Size([1, 16, 50])
...@@ -6,56 +6,10 @@ from mmdet3d.models.layers import SparseBasicBlock ...@@ -6,56 +6,10 @@ from mmdet3d.models.layers import SparseBasicBlock
from mmdet3d.models.layers.spconv import IS_SPCONV2_AVAILABLE from mmdet3d.models.layers.spconv import IS_SPCONV2_AVAILABLE
if IS_SPCONV2_AVAILABLE: if IS_SPCONV2_AVAILABLE:
from spconv.pytorch import (SparseConv3d, SparseConvTensor, from spconv.pytorch import (SparseConvTensor, SparseInverseConv3d,
SparseInverseConv3d, SubMConv3d) SubMConv3d)
else: else:
from mmcv.ops import (SparseConv3d, SparseConvTensor, SparseInverseConv3d, from mmcv.ops import SparseConvTensor, SparseInverseConv3d, SubMConv3d
SubMConv3d)
def test_SparseUNet():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
from mmdet3d.models.middle_encoders.sparse_unet import SparseUNet
self = SparseUNet(in_channels=4, sparse_shape=[41, 1600, 1408]).cuda()
# test encoder layers
assert len(self.encoder_layers) == 4
assert self.encoder_layers.encoder_layer1[0][0].in_channels == 16
assert self.encoder_layers.encoder_layer1[0][0].out_channels == 16
assert isinstance(self.encoder_layers.encoder_layer1[0][0], SubMConv3d)
assert isinstance(self.encoder_layers.encoder_layer1[0][1],
torch.nn.modules.batchnorm.BatchNorm1d)
assert isinstance(self.encoder_layers.encoder_layer1[0][2],
torch.nn.modules.activation.ReLU)
assert self.encoder_layers.encoder_layer4[0][0].in_channels == 64
assert self.encoder_layers.encoder_layer4[0][0].out_channels == 64
assert isinstance(self.encoder_layers.encoder_layer4[0][0], SparseConv3d)
assert isinstance(self.encoder_layers.encoder_layer4[2][0], SubMConv3d)
# test decoder layers
assert isinstance(self.lateral_layer1, SparseBasicBlock)
assert isinstance(self.merge_layer1[0], SubMConv3d)
assert isinstance(self.upsample_layer1[0], SubMConv3d)
assert isinstance(self.upsample_layer2[0], SparseInverseConv3d)
voxel_features = torch.tensor(
[[6.56126, 0.9648336, -1.7339306, 0.315],
[6.8162713, -2.480431, -1.3616394, 0.36],
[11.643568, -4.744306, -1.3580885, 0.16],
[23.482342, 6.5036807, 0.5806964, 0.35]],
dtype=torch.float32).cuda() # n, point_features
coordinates = torch.tensor(
[[0, 12, 819, 131], [0, 16, 750, 136], [1, 16, 705, 232],
[1, 35, 930, 469]],
dtype=torch.int32).cuda() # n, 4(batch, ind_x, ind_y, ind_z)
unet_ret_dict = self.forward(voxel_features, coordinates, 2)
seg_features = unet_ret_dict['seg_features']
spatial_features = unet_ret_dict['spatial_features']
assert seg_features.shape == torch.Size([4, 16])
assert spatial_features.shape == torch.Size([2, 256, 200, 176])
def test_SparseBasicBlock(): def test_SparseBasicBlock():
......
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import random
import numpy as np
import pytest import pytest
import torch import torch
from torch import nn as nn
from mmdet3d.models.builder import build_loss
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def test_chamfer_disrance(): def test_chamfer_disrance():
...@@ -95,101 +70,3 @@ def test_chamfer_disrance(): ...@@ -95,101 +70,3 @@ def test_chamfer_disrance():
or torch.equal(indices1, indices1.new_tensor(expected_inds2))) or torch.equal(indices1, indices1.new_tensor(expected_inds2)))
assert (indices2 == indices2.new_tensor([[0, 0, 0, 0, 0], [0, 3, 6, 0, assert (indices2 == indices2.new_tensor([[0, 0, 0, 0, 0], [0, 3, 6, 0,
0]])).all() 0]])).all()
def test_paconv_regularization_loss():
from mmdet3d.models.layers import PAConv, PAConvCUDA
from mmdet3d.models.losses import PAConvRegularizationLoss
class ToyModel(nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
self.paconvs = nn.ModuleList()
self.paconvs.append(PAConv(8, 16, 8))
self.paconvs.append(PAConv(8, 16, 8, kernel_input='identity'))
self.paconvs.append(PAConvCUDA(8, 16, 8))
self.conv1 = nn.Conv1d(3, 8, 1)
set_random_seed(0, True)
model = ToyModel()
# reduction should be in ['none', 'mean', 'sum']
with pytest.raises(AssertionError):
paconv_corr_loss = PAConvRegularizationLoss(reduction='l2')
paconv_corr_loss = PAConvRegularizationLoss(reduction='mean')
mean_corr_loss = paconv_corr_loss(model.modules())
assert mean_corr_loss >= 0
assert mean_corr_loss.requires_grad
sum_corr_loss = paconv_corr_loss(model.modules(), reduction_override='sum')
assert torch.allclose(sum_corr_loss, mean_corr_loss * 3)
none_corr_loss = paconv_corr_loss(
model.modules(), reduction_override='none')
assert none_corr_loss.shape[0] == 3
assert torch.allclose(none_corr_loss.mean(), mean_corr_loss)
def test_uncertain_smooth_l1_loss():
from mmdet3d.models.losses import UncertainL1Loss, UncertainSmoothL1Loss
# reduction should be in ['none', 'mean', 'sum']
with pytest.raises(AssertionError):
uncertain_l1_loss = UncertainL1Loss(reduction='l2')
with pytest.raises(AssertionError):
uncertain_smooth_l1_loss = UncertainSmoothL1Loss(reduction='l2')
pred = torch.tensor([1.5783, 0.5972, 1.4821, 0.9488])
target = torch.tensor([1.0813, -0.3466, -1.1404, -0.9665])
sigma = torch.tensor([-1.0053, 0.4710, -1.7784, -0.8603])
# test uncertain l1 loss
uncertain_l1_loss_cfg = dict(
type='UncertainL1Loss', alpha=1.0, reduction='mean', loss_weight=1.0)
uncertain_l1_loss = build_loss(uncertain_l1_loss_cfg)
mean_l1_loss = uncertain_l1_loss(pred, target, sigma)
expected_l1_loss = torch.tensor(4.7069)
assert torch.allclose(mean_l1_loss, expected_l1_loss, atol=1e-4)
# test uncertain smooth l1 loss
uncertain_smooth_l1_loss_cfg = dict(
type='UncertainSmoothL1Loss',
alpha=1.0,
beta=0.5,
reduction='mean',
loss_weight=1.0)
uncertain_smooth_l1_loss = build_loss(uncertain_smooth_l1_loss_cfg)
mean_smooth_l1_loss = uncertain_smooth_l1_loss(pred, target, sigma)
expected_smooth_l1_loss = torch.tensor(3.9795)
assert torch.allclose(
mean_smooth_l1_loss, expected_smooth_l1_loss, atol=1e-4)
def test_multibin_loss():
from mmdet3d.models.losses import MultiBinLoss
# reduction should be in ['none', 'mean', 'sum']
with pytest.raises(AssertionError):
multibin_loss = MultiBinLoss(reduction='l2')
pred = torch.tensor([[
0.81, 0.32, 0.78, 0.52, 0.24, 0.12, 0.32, 0.11, 1.20, 1.30, 0.20, 0.11,
0.12, 0.11, 0.23, 0.31
],
[
0.02, 0.19, 0.78, 0.22, 0.31, 0.12, 0.22, 0.11,
1.20, 1.30, 0.45, 0.51, 0.12, 0.11, 0.13, 0.61
]])
target = torch.tensor([[1, 1, 0, 0, 2.14, 3.12, 0.68, -2.15],
[1, 1, 0, 0, 3.12, 3.12, 2.34, 1.23]])
multibin_loss_cfg = dict(
type='MultiBinLoss', reduction='none', loss_weight=1.0)
multibin_loss = build_loss(multibin_loss_cfg)
output_multibin_loss = multibin_loss(pred, target, num_dir_bins=4)
expected_multibin_loss = torch.tensor(2.1120)
assert torch.allclose(
output_multibin_loss, expected_multibin_loss, atol=1e-4)
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet3d.models.builder import build_loss
def test_multibin_loss():
from mmdet3d.models.losses import MultiBinLoss
# reduction should be in ['none', 'mean', 'sum']
with pytest.raises(AssertionError):
multibin_loss = MultiBinLoss(reduction='l2')
pred = torch.tensor([[
0.81, 0.32, 0.78, 0.52, 0.24, 0.12, 0.32, 0.11, 1.20, 1.30, 0.20, 0.11,
0.12, 0.11, 0.23, 0.31
],
[
0.02, 0.19, 0.78, 0.22, 0.31, 0.12, 0.22, 0.11,
1.20, 1.30, 0.45, 0.51, 0.12, 0.11, 0.13, 0.61
]])
target = torch.tensor([[1, 1, 0, 0, 2.14, 3.12, 0.68, -2.15],
[1, 1, 0, 0, 3.12, 3.12, 2.34, 1.23]])
multibin_loss_cfg = dict(
type='MultiBinLoss', reduction='none', loss_weight=1.0)
multibin_loss = build_loss(multibin_loss_cfg)
output_multibin_loss = multibin_loss(pred, target, num_dir_bins=4)
expected_multibin_loss = torch.tensor(2.1120)
assert torch.allclose(
output_multibin_loss, expected_multibin_loss, atol=1e-4)
# Copyright (c) OpenMMLab. All rights reserved.
import random
import numpy as np
import pytest
import torch
from torch import nn as nn
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def test_paconv_regularization_loss():
from mmdet3d.models.layers import PAConv, PAConvCUDA
from mmdet3d.models.losses import PAConvRegularizationLoss
class ToyModel(nn.Module):
def __init__(self):
super(ToyModel, self).__init__()
self.paconvs = nn.ModuleList()
self.paconvs.append(PAConv(8, 16, 8))
self.paconvs.append(PAConv(8, 16, 8, kernel_input='identity'))
self.paconvs.append(PAConvCUDA(8, 16, 8))
self.conv1 = nn.Conv1d(3, 8, 1)
set_random_seed(0, True)
model = ToyModel()
# reduction should be in ['none', 'mean', 'sum']
with pytest.raises(AssertionError):
paconv_corr_loss = PAConvRegularizationLoss(reduction='l2')
paconv_corr_loss = PAConvRegularizationLoss(reduction='mean')
mean_corr_loss = paconv_corr_loss(model.modules())
assert mean_corr_loss >= 0
assert mean_corr_loss.requires_grad
sum_corr_loss = paconv_corr_loss(model.modules(), reduction_override='sum')
assert torch.allclose(sum_corr_loss, mean_corr_loss * 3)
none_corr_loss = paconv_corr_loss(
model.modules(), reduction_override='none')
assert none_corr_loss.shape[0] == 3
assert torch.allclose(none_corr_loss.mean(), mean_corr_loss)
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet3d.models.builder import build_loss
def test_uncertain_smooth_l1_loss():
from mmdet3d.models.losses import UncertainL1Loss, UncertainSmoothL1Loss
# reduction should be in ['none', 'mean', 'sum']
with pytest.raises(AssertionError):
uncertain_l1_loss = UncertainL1Loss(reduction='l2')
with pytest.raises(AssertionError):
uncertain_smooth_l1_loss = UncertainSmoothL1Loss(reduction='l2')
pred = torch.tensor([1.5783, 0.5972, 1.4821, 0.9488])
target = torch.tensor([1.0813, -0.3466, -1.1404, -0.9665])
sigma = torch.tensor([-1.0053, 0.4710, -1.7784, -0.8603])
# test uncertain l1 loss
uncertain_l1_loss_cfg = dict(
type='UncertainL1Loss', alpha=1.0, reduction='mean', loss_weight=1.0)
uncertain_l1_loss = build_loss(uncertain_l1_loss_cfg)
mean_l1_loss = uncertain_l1_loss(pred, target, sigma)
expected_l1_loss = torch.tensor(4.7069)
assert torch.allclose(mean_l1_loss, expected_l1_loss, atol=1e-4)
# test uncertain smooth l1 loss
uncertain_smooth_l1_loss_cfg = dict(
type='UncertainSmoothL1Loss',
alpha=1.0,
beta=0.5,
reduction='mean',
loss_weight=1.0)
uncertain_smooth_l1_loss = build_loss(uncertain_smooth_l1_loss_cfg)
mean_smooth_l1_loss = uncertain_smooth_l1_loss(pred, target, sigma)
expected_smooth_l1_loss = torch.tensor(3.9795)
assert torch.allclose(
mean_smooth_l1_loss, expected_smooth_l1_loss, atol=1e-4)
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet3d.models.layers import SparseBasicBlock
from mmdet3d.models.layers.spconv import IS_SPCONV2_AVAILABLE
if IS_SPCONV2_AVAILABLE:
from spconv.pytorch import SparseConv3d, SparseInverseConv3d, SubMConv3d
else:
from mmcv.ops import SparseConv3d, SparseInverseConv3d, SubMConv3d
def test_SparseUNet():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
from mmdet3d.models.middle_encoders.sparse_unet import SparseUNet
self = SparseUNet(in_channels=4, sparse_shape=[41, 1600, 1408]).cuda()
# test encoder layers
assert len(self.encoder_layers) == 4
assert self.encoder_layers.encoder_layer1[0][0].in_channels == 16
assert self.encoder_layers.encoder_layer1[0][0].out_channels == 16
assert isinstance(self.encoder_layers.encoder_layer1[0][0], SubMConv3d)
assert isinstance(self.encoder_layers.encoder_layer1[0][1],
torch.nn.modules.batchnorm.BatchNorm1d)
assert isinstance(self.encoder_layers.encoder_layer1[0][2],
torch.nn.modules.activation.ReLU)
assert self.encoder_layers.encoder_layer4[0][0].in_channels == 64
assert self.encoder_layers.encoder_layer4[0][0].out_channels == 64
assert isinstance(self.encoder_layers.encoder_layer4[0][0], SparseConv3d)
assert isinstance(self.encoder_layers.encoder_layer4[2][0], SubMConv3d)
# test decoder layers
assert isinstance(self.lateral_layer1, SparseBasicBlock)
assert isinstance(self.merge_layer1[0], SubMConv3d)
assert isinstance(self.upsample_layer1[0], SubMConv3d)
assert isinstance(self.upsample_layer2[0], SparseInverseConv3d)
voxel_features = torch.tensor(
[[6.56126, 0.9648336, -1.7339306, 0.315],
[6.8162713, -2.480431, -1.3616394, 0.36],
[11.643568, -4.744306, -1.3580885, 0.16],
[23.482342, 6.5036807, 0.5806964, 0.35]],
dtype=torch.float32).cuda() # n, point_features
coordinates = torch.tensor(
[[0, 12, 819, 131], [0, 16, 750, 136], [1, 16, 705, 232],
[1, 35, 930, 469]],
dtype=torch.int32).cuda() # n, 4(batch, ind_x, ind_y, ind_z)
unet_ret_dict = self.forward(voxel_features, coordinates, 2)
seg_features = unet_ret_dict['seg_features']
spatial_features = unet_ret_dict['spatial_features']
assert seg_features.shape == torch.Size([4, 16])
assert spatial_features.shape == torch.Size([2, 256, 200, 176])
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet3d.models.builder import build_backbone, build_neck
def test_centerpoint_fpn():
second_cfg = dict(
type='SECOND',
in_channels=64,
out_channels=[64, 128, 256],
layer_nums=[3, 5, 5],
layer_strides=[2, 2, 2],
norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
conv_cfg=dict(type='Conv2d', bias=False))
second = build_backbone(second_cfg)
# centerpoint usage of fpn
centerpoint_fpn_cfg = dict(
type='SECONDFPN',
in_channels=[64, 128, 256],
out_channels=[128, 128, 128],
upsample_strides=[0.5, 1, 2],
norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
upsample_cfg=dict(type='deconv', bias=False),
use_conv_for_no_stride=True)
# original usage of fpn
fpn_cfg = dict(
type='SECONDFPN',
in_channels=[64, 128, 256],
upsample_strides=[1, 2, 4],
out_channels=[128, 128, 128])
second_fpn = build_neck(fpn_cfg)
centerpoint_second_fpn = build_neck(centerpoint_fpn_cfg)
input = torch.rand([4, 64, 512, 512])
sec_output = second(input)
centerpoint_output = centerpoint_second_fpn(sec_output)
second_output = second_fpn(sec_output)
assert centerpoint_output[0].shape == torch.Size([4, 384, 128, 128])
assert second_output[0].shape == torch.Size([4, 384, 256, 256])
def test_imvoxel_neck(): import torch
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
neck_cfg = dict(
type='OutdoorImVoxelNeck', in_channels=64, out_channels=256)
neck = build_neck(neck_cfg).cuda()
inputs = torch.rand([1, 64, 216, 248, 12], device='cuda')
outputs = neck(inputs)
assert outputs[0].shape == (1, 256, 248, 216)
def test_fp_neck():
if not torch.cuda.is_available():
pytest.skip()
xyzs = [16384, 4096, 1024, 256, 64]
feat_channels = [1, 96, 256, 512, 1024]
channel_num = 5
sa_xyz = [torch.rand(3, xyzs[i], 3) for i in range(channel_num)]
sa_features = [
torch.rand(3, feat_channels[i], xyzs[i]) for i in range(channel_num)
]
neck_cfg = dict(
type='PointNetFPNeck',
fp_channels=((1536, 512, 512), (768, 512, 512), (608, 256, 256),
(257, 128, 128)))
neck = build_neck(neck_cfg)
neck.init_weights()
if torch.cuda.is_available():
sa_xyz = [x.cuda() for x in sa_xyz]
sa_features = [x.cuda() for x in sa_features]
neck.cuda()
feats_sa = {'sa_xyz': sa_xyz, 'sa_features': sa_features} from mmdet3d.models.builder import build_neck
outputs = neck(feats_sa)
assert outputs['fp_xyz'].cpu().numpy().shape == (3, 16384, 3)
assert outputs['fp_features'].detach().cpu().numpy().shape == (3, 128,
16384)
def test_dla_neck(): def test_dla_neck():
......
import pytest
import torch
from mmdet3d.models.builder import build_neck
def test_imvoxel_neck():
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
neck_cfg = dict(
type='OutdoorImVoxelNeck', in_channels=64, out_channels=256)
neck = build_neck(neck_cfg).cuda()
inputs = torch.rand([1, 64, 216, 248, 12], device='cuda')
outputs = neck(inputs)
assert outputs[0].shape == (1, 256, 248, 216)
import pytest
import torch
from mmdet3d.models.builder import build_neck
def test_pointnet2_fp_neck():
if not torch.cuda.is_available():
pytest.skip()
xyzs = [16384, 4096, 1024, 256, 64]
feat_channels = [1, 96, 256, 512, 1024]
channel_num = 5
sa_xyz = [torch.rand(3, xyzs[i], 3) for i in range(channel_num)]
sa_features = [
torch.rand(3, feat_channels[i], xyzs[i]) for i in range(channel_num)
]
neck_cfg = dict(
type='PointNetFPNeck',
fp_channels=((1536, 512, 512), (768, 512, 512), (608, 256, 256),
(257, 128, 128)))
neck = build_neck(neck_cfg)
neck.init_weights()
if torch.cuda.is_available():
sa_xyz = [x.cuda() for x in sa_xyz]
sa_features = [x.cuda() for x in sa_features]
neck.cuda()
feats_sa = {'sa_xyz': sa_xyz, 'sa_features': sa_features}
outputs = neck(feats_sa)
assert outputs['fp_xyz'].cpu().numpy().shape == (3, 16384, 3)
assert outputs['fp_features'].detach().cpu().numpy().shape == (3, 128,
16384)
# Copyright (c) OpenMMLab. All rights reserved. # Copyright (c) OpenMMLab. All rights reserved.
import pytest import pytest
import torch
from mmdet3d.models.builder import build_backbone, build_neck
def test_secfpn(): def test_secfpn():
...@@ -36,3 +39,45 @@ def test_secfpn(): ...@@ -36,3 +39,45 @@ def test_secfpn():
) )
with pytest.raises(AssertionError): with pytest.raises(AssertionError):
build_neck(neck_cfg) build_neck(neck_cfg)
def test_centerpoint_fpn():
second_cfg = dict(
type='SECOND',
in_channels=64,
out_channels=[64, 128, 256],
layer_nums=[3, 5, 5],
layer_strides=[2, 2, 2],
norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
conv_cfg=dict(type='Conv2d', bias=False))
second = build_backbone(second_cfg)
# centerpoint usage of fpn
centerpoint_fpn_cfg = dict(
type='SECONDFPN',
in_channels=[64, 128, 256],
out_channels=[128, 128, 128],
upsample_strides=[0.5, 1, 2],
norm_cfg=dict(type='BN', eps=1e-3, momentum=0.01),
upsample_cfg=dict(type='deconv', bias=False),
use_conv_for_no_stride=True)
# original usage of fpn
fpn_cfg = dict(
type='SECONDFPN',
in_channels=[64, 128, 256],
upsample_strides=[1, 2, 4],
out_channels=[128, 128, 128])
second_fpn = build_neck(fpn_cfg)
centerpoint_second_fpn = build_neck(centerpoint_fpn_cfg)
input = torch.rand([4, 64, 512, 512])
sec_output = second(input)
centerpoint_output = centerpoint_second_fpn(sec_output)
second_output = second_fpn(sec_output)
assert centerpoint_output[0].shape == torch.Size([4, 384, 128, 128])
assert second_output[0].shape == torch.Size([4, 384, 256, 256])
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet3d.registry import TASK_UTILS
from mmdet3d.structures import LiDARInstance3DBoxes
def test_anchor_free_box_coder():
box_coder_cfg = dict(
type='AnchorFreeBBoxCoder', num_dir_bins=12, with_rot=True)
box_coder = TASK_UTILS.build(box_coder_cfg)
# test encode
gt_bboxes = LiDARInstance3DBoxes([[
2.1227e+00, 5.7951e+00, -9.9900e-01, 1.6736e+00, 4.2419e+00,
1.5473e+00, -1.5501e+00
],
[
1.1791e+01, 9.0276e+00, -8.5772e-01,
1.6210e+00, 3.5367e+00, 1.4841e+00,
-1.7369e+00
],
[
2.3638e+01, 9.6997e+00, -5.6713e-01,
1.7578e+00, 4.6103e+00, 1.5999e+00,
-1.4556e+00
]])
gt_labels = torch.tensor([0, 0, 0])
(center_targets, size_targets, dir_class_targets,
dir_res_targets) = box_coder.encode(gt_bboxes, gt_labels)
expected_center_target = torch.tensor([[2.1227, 5.7951, -0.2253],
[11.7908, 9.0276, -0.1156],
[23.6380, 9.6997, 0.2328]])
expected_size_targets = torch.tensor([[0.8368, 2.1210, 0.7736],
[0.8105, 1.7683, 0.7421],
[0.8789, 2.3052, 0.8000]])
expected_dir_class_target = torch.tensor([9, 9, 9])
expected_dir_res_target = torch.tensor([0.0394, -0.3172, 0.2199])
assert torch.allclose(center_targets, expected_center_target, atol=1e-4)
assert torch.allclose(size_targets, expected_size_targets, atol=1e-4)
assert torch.all(dir_class_targets == expected_dir_class_target)
assert torch.allclose(dir_res_targets, expected_dir_res_target, atol=1e-3)
# test decode
center = torch.tensor([[[14.5954, 6.3312, 0.7671],
[67.5245, 22.4422, 1.5610],
[47.7693, -6.7980, 1.4395]]])
size_res = torch.tensor([[[-1.0752, 1.8760, 0.7715],
[-0.8016, 1.1754, 0.0102],
[-1.2789, 0.5948, 0.4728]]])
dir_class = torch.tensor([[[
0.1512, 1.7914, -1.7658, 2.1572, -0.9215, 1.2139, 0.1749, 0.8606,
1.1743, -0.7679, -1.6005, 0.4623
],
[
-0.3957, 1.2026, -1.2677, 1.3863, -0.5754,
1.7083, 0.2601, 0.1129, 0.7146, -0.1367,
-1.2892, -0.0083
],
[
-0.8862, 1.2050, -1.3881, 1.6604, -0.9087,
1.1907, -0.0280, 0.2027, 1.0644, -0.7205,
-1.0738, 0.4748
]]])
dir_res = torch.tensor([[[
1.1151, 0.5535, -0.2053, -0.6582, -0.1616, -0.1821, 0.4675, 0.6621,
0.8146, -0.0448, -0.7253, -0.7171
],
[
0.7888, 0.2478, -0.1962, -0.7267, 0.0573,
-0.2398, 0.6984, 0.5859, 0.7507, -0.1980,
-0.6538, -0.6602
],
[
0.9039, 0.6109, 0.1960, -0.5016, 0.0551,
-0.4086, 0.3398, 0.2759, 0.7247, -0.0655,
-0.5052, -0.9026
]]])
bbox_out = dict(
center=center, size=size_res, dir_class=dir_class, dir_res=dir_res)
bbox3d = box_coder.decode(bbox_out)
expected_bbox3d = torch.tensor(
[[[14.5954, 6.3312, 0.7671, 0.1000, 3.7521, 1.5429, 0.9126],
[67.5245, 22.4422, 1.5610, 0.1000, 2.3508, 0.1000, 2.3782],
[47.7693, -6.7980, 1.4395, 0.1000, 1.1897, 0.9456, 1.0692]]])
assert torch.allclose(bbox3d, expected_bbox3d, atol=1e-4)
# test split_pred
cls_preds = torch.rand(2, 1, 256)
reg_preds = torch.rand(2, 30, 256)
base_xyz = torch.rand(2, 256, 3)
results = box_coder.split_pred(cls_preds, reg_preds, base_xyz)
obj_scores = results['obj_scores']
center = results['center']
center_offset = results['center_offset']
dir_class = results['dir_class']
dir_res_norm = results['dir_res_norm']
dir_res = results['dir_res']
size = results['size']
assert obj_scores.shape == torch.Size([2, 1, 256])
assert center.shape == torch.Size([2, 256, 3])
assert center_offset.shape == torch.Size([2, 256, 3])
assert dir_class.shape == torch.Size([2, 256, 12])
assert dir_res_norm.shape == torch.Size([2, 256, 12])
assert dir_res.shape == torch.Size([2, 256, 12])
assert size.shape == torch.Size([2, 256, 3])
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet3d.registry import TASK_UTILS
def test_centerpoint_bbox_coder():
bbox_coder_cfg = dict(
type='CenterPointBBoxCoder',
post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0],
max_num=500,
score_threshold=0.1,
pc_range=[-51.2, -51.2],
out_size_factor=4,
voxel_size=[0.2, 0.2])
bbox_coder = TASK_UTILS.build(bbox_coder_cfg)
batch_dim = torch.rand([2, 3, 128, 128])
batch_hei = torch.rand([2, 1, 128, 128])
batch_hm = torch.rand([2, 2, 128, 128])
batch_reg = torch.rand([2, 2, 128, 128])
batch_rotc = torch.rand([2, 1, 128, 128])
batch_rots = torch.rand([2, 1, 128, 128])
batch_vel = torch.rand([2, 2, 128, 128])
temp = bbox_coder.decode(batch_hm, batch_rots, batch_rotc, batch_hei,
batch_dim, batch_vel, batch_reg, 5)
for i in range(len(temp)):
assert temp[i]['bboxes'].shape == torch.Size([500, 9])
assert temp[i]['scores'].shape == torch.Size([500])
assert temp[i]['labels'].shape == torch.Size([500])
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment