Commit 0be27ffb authored by Jingwei Zhang's avatar Jingwei Zhang Committed by ZwwWayne
Browse files

[Refactor] Refactor FCAF3D (#1945)

* add fcaf3d config

* support inference of fcaf3d on scannet-3d

* minor changes

* fix config of scannet

* align test precision of facaf3d && support training

* add ut

* fix bugs of dataset conversion and indoor_metric

* support fcaf3d on sunrgbd datasets and add rorate_iou_loss ut

* small fix

* add docstring and typehint

* add typehint for SparseTensor

* remove pdb

* fix fcaf3d ut
parent 1e0e50d5
......@@ -8,7 +8,7 @@ metainfo = dict(
'refrigerator', 'showercurtrain', 'toilet', 'sink', 'bathtub',
'garbagebin'))
file_client_args = dict(backend='disk')
# file_client_args = dict(backend='disk')
# Uncomment the following if use ceph or other file clients.
# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient
# for more details.
......
model = dict(
type='MinkSingleStage3DDetector',
data_preprocessor=dict(type='Det3DDataPreprocessor'),
backbone=dict(type='MinkResNet', in_channels=3, depth=34),
bbox_head=dict(
type='FCAF3DHead',
in_channels=(64, 128, 256, 512),
out_channels=128,
voxel_size=.01,
pts_prune_threshold=100000,
pts_assign_threshold=27,
pts_center_threshold=18,
num_classes=18,
num_reg_outs=6,
center_loss=dict(type='mmdet.CrossEntropyLoss', use_sigmoid=True),
bbox_loss=dict(type='AxisAlignedIoULoss'),
cls_loss=dict(type='mmdet.FocalLoss'),
),
train_cfg=dict(),
test_cfg=dict(nms_pre=1000, iou_thr=.5, score_thr=.01))
_base_ = [
'../_base_/models/fcaf3d.py', '../_base_/default_runtime.py',
'../_base_/datasets/scannet-3d.py'
]
n_points = 100000
train_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=True,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(type='LoadAnnotations3D'),
dict(type='GlobalAlignment', rotation_axis=2),
dict(type='PointSample', num_points=n_points),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=0.5,
flip_ratio_bev_vertical=0.5),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.087266, 0.087266],
scale_ratio_range=[.9, 1.1],
translation_std=[.1, .1, .1],
shift_height=False),
dict(type='NormalizePointsColor', color_mean=None),
dict(
type='Pack3DDetInputs',
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
]
test_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=True,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(type='GlobalAlignment', rotation_axis=2),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=0.5,
flip_ratio_bev_vertical=0.5),
dict(type='PointSample', num_points=n_points),
dict(type='NormalizePointsColor', color_mean=None),
]),
dict(type='Pack3DDetInputs', keys=['points'])
]
train_dataloader = dict(
dataset=dict(
type='RepeatDataset',
times=10,
dataset=dict(pipeline=train_pipeline, filter_empty_gt=True)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.001, weight_decay=0.0001),
clip_grad=dict(max_norm=10, norm_type=2))
# learning rate
param_scheduler = dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
custom_hooks = [dict(type='EmptyCacheHook', after_iter=True)]
# training schedule for 1x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=12)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
_base_ = [
'../_base_/models/fcaf3d.py', '../_base_/default_runtime.py',
'../_base_/datasets/sunrgbd-3d.py'
]
n_points = 100000
model = dict(
bbox_head=dict(
num_classes=10,
num_reg_outs=8,
bbox_loss=dict(type='RotatedIoU3DLoss')))
train_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(type='LoadAnnotations3D'),
dict(type='PointSample', num_points=n_points),
dict(type='RandomFlip3D', sync_2d=False, flip_ratio_bev_horizontal=0.5),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.523599, 0.523599],
scale_ratio_range=[0.85, 1.15],
translation_std=[.1, .1, .1],
shift_height=False),
dict(
type='Pack3DDetInputs',
keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
]
test_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(
type='RandomFlip3D',
sync_2d=False,
flip_ratio_bev_horizontal=0.5,
flip_ratio_bev_vertical=0.5),
dict(type='PointSample', num_points=n_points)
]),
dict(type='Pack3DDetInputs', keys=['points'])
]
train_dataloader = dict(
batch_size=8,
dataset=dict(
type='RepeatDataset',
times=3,
dataset=dict(pipeline=train_pipeline, filter_empty_gt=True)))
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
optim_wrapper = dict(
type='OptimWrapper',
optimizer=dict(type='AdamW', lr=0.001, weight_decay=0.0001),
clip_grad=dict(max_norm=10, norm_type=2))
# learning rate
param_scheduler = dict(
type='MultiStepLR',
begin=0,
end=12,
by_epoch=True,
milestones=[8, 11],
gamma=0.1)
custom_hooks = [dict(type='EmptyCacheHook', after_iter=True)]
# training schedule for 1x
train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=12)
val_cfg = dict(type='ValLoop')
test_cfg = dict(type='TestLoop')
......@@ -78,8 +78,9 @@ class IndoorMetric(BaseMetric):
ann_infos.append(eval_ann)
pred_results.append(sinlge_pred_results)
# some checkpoints may not record the key "box_type_3d"
box_type_3d, box_mode_3d = get_box_type(
self.dataset_meta['box_type_3d'])
self.dataset_meta.get('box_type_3d', 'depth'))
ret_dict = indoor_eval(
ann_infos,
......
......@@ -58,7 +58,7 @@ class MinkResNet(nn.Module):
for i, num_blocks in enumerate(stage_blocks):
setattr(
self, f'layer{i}',
self, f'layer{i + 1}',
self._make_layer(block, 64 * 2**i, stage_blocks[i], stride=2))
def init_weights(self):
......@@ -111,6 +111,6 @@ class MinkResNet(nn.Module):
x = self.maxpool(x)
outs = []
for i in range(self.num_stages):
x = getattr(self, f'layer{i}')(x)
x = getattr(self, f'layer{i + 1}')(x)
outs.append(x)
return outs
......@@ -5,6 +5,7 @@ from .base_3d_dense_head import Base3DDenseHead
from .base_conv_bbox_head import BaseConvBboxHead
from .base_mono3d_dense_head import BaseMono3DDenseHead
from .centerpoint_head import CenterHead
from .fcaf3d_head import FCAF3DHead
from .fcos_mono3d_head import FCOSMono3DHead
from .free_anchor3d_head import FreeAnchor3DHead
from .groupfree3d_head import GroupFree3DHead
......@@ -22,5 +23,5 @@ __all__ = [
'SSD3DHead', 'BaseConvBboxHead', 'CenterHead', 'ShapeAwareHead',
'BaseMono3DDenseHead', 'AnchorFreeMono3DHead', 'FCOSMono3DHead',
'GroupFree3DHead', 'PointRPNHead', 'SMOKEMono3DHead', 'PGDHead',
'MonoFlexHead', 'Base3DDenseHead'
'MonoFlexHead', 'Base3DDenseHead', 'FCAF3DHead'
]
# Copyright (c) OpenMMLab. All rights reserved.
# Adapted from https://github.com/SamsungLabs/fcaf3d/blob/master/mmdet3d/models/dense_heads/fcaf3d_neck_with_head.py # noqa
from typing import List, Optional, Tuple
try:
import MinkowskiEngine as ME
from MinkowskiEngine import SparseTensor
except ImportError:
# Please follow getting_started.md to install MinkowskiEngine.
SparseTensor = None
pass
import torch
from mmcv.cnn import Scale
from mmcv.ops import nms3d, nms3d_normal
from mmengine.model import bias_init_with_prob
from mmengine.structures import InstanceData
from torch import Tensor, nn
from mmdet3d.models import HEADS, build_loss
from mmdet3d.structures import BaseInstance3DBoxes, rotation_3d_in_axis
from mmdet3d.utils import InstanceList, OptInstanceList
from mmdet.utils import reduce_mean
from .base_3d_dense_head import Base3DDenseHead
@HEADS.register_module()
class FCAF3DHead(Base3DDenseHead):
r"""Bbox head of `FCAF3D <https://arxiv.org/abs/2112.00322>`_.
Actually here we store both the sparse 3D FPN and a head. The neck and
the head can not be simply separated as pruning score on the i-th level
of FPN requires classification scores from i+1-th level of the head.
Args:
num_classes (int): Number of classes.
in_channels (tuple[int]): Number of channels in input tensors.
out_channels (int): Number of channels in the neck output tensors.
num_reg_outs (int): Number of regression layer channels.
voxel_size (float): Voxel size in meters.
pts_prune_threshold (int): Pruning threshold on each feature level.
pts_assign_threshold (int): Box to location assigner parameter.
Assigner selects the maximum feature level with more locations
inside the box than pts_assign_threshold.
pts_center_threshold (int): Box to location assigner parameter.
After feature level for the box is determined, assigner selects
pts_center_threshold locations closest to the box center.
center_loss (dict, optional): Config of centerness loss.
bbox_loss (dict, optional): Config of bbox loss.
cls_loss (dict, optional): Config of classification loss.
train_cfg (dict, optional): Config for train stage. Defaults to None.
test_cfg (dict, optional): Config for test stage. Defaults to None.
init_cfg (dict, optional): Config for weight initialization.
Defaults to None.
"""
def __init__(self,
num_classes: int,
in_channels: int,
out_channels: int,
num_reg_outs: int,
voxel_size: float,
pts_prune_threshold: int,
pts_assign_threshold: int,
pts_center_threshold: int,
center_loss: dict = dict(
type='mmdet.CrossEntropyLoss', use_sigmoid=True),
bbox_loss: dict = dict(type='AxisAlignedIoULoss'),
cls_loss: dict = dict(type='mmdet.FocalLoss'),
train_cfg: Optional[dict] = None,
test_cfg: Optional[dict] = None,
init_cfg: Optional[dict] = None):
super(FCAF3DHead, self).__init__(init_cfg)
self.voxel_size = voxel_size
self.pts_prune_threshold = pts_prune_threshold
self.pts_assign_threshold = pts_assign_threshold
self.pts_center_threshold = pts_center_threshold
self.center_loss = build_loss(center_loss)
self.bbox_loss = build_loss(bbox_loss)
self.cls_loss = build_loss(cls_loss)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self._init_layers(in_channels, out_channels, num_reg_outs, num_classes)
@staticmethod
def _make_block(in_channels: int, out_channels: int) -> nn.Module:
"""Construct Conv-Norm-Act block.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
Returns:
torch.nn.Module: With corresponding layers.
"""
return nn.Sequential(
ME.MinkowskiConvolution(
in_channels, out_channels, kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(out_channels), ME.MinkowskiELU())
@staticmethod
def _make_up_block(in_channels: int, out_channels: int) -> nn.Module:
"""Construct DeConv-Norm-Act-Conv-Norm-Act block.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
Returns:
torch.nn.Module: With corresponding layers.
"""
return nn.Sequential(
ME.MinkowskiGenerativeConvolutionTranspose(
in_channels,
out_channels,
kernel_size=2,
stride=2,
dimension=3), ME.MinkowskiBatchNorm(out_channels),
ME.MinkowskiELU(),
ME.MinkowskiConvolution(
out_channels, out_channels, kernel_size=3, dimension=3),
ME.MinkowskiBatchNorm(out_channels), ME.MinkowskiELU())
def _init_layers(self, in_channels: Tuple[int], out_channels: int,
num_reg_outs: int, num_classes: int):
"""Initialize layers.
Args:
in_channels (tuple[int]): Number of channels in input tensors.
out_channels (int): Number of channels in the neck output tensors.
num_reg_outs (int): Number of regression layer channels.
num_classes (int): Number of classes.
"""
# neck layers
self.pruning = ME.MinkowskiPruning()
for i in range(len(in_channels)):
if i > 0:
self.__setattr__(
f'up_block_{i}',
self._make_up_block(in_channels[i], in_channels[i - 1]))
self.__setattr__(f'out_block_{i}',
self._make_block(in_channels[i], out_channels))
# head layers
self.conv_center = ME.MinkowskiConvolution(
out_channels, 1, kernel_size=1, dimension=3)
self.conv_reg = ME.MinkowskiConvolution(
out_channels, num_reg_outs, kernel_size=1, dimension=3)
self.conv_cls = ME.MinkowskiConvolution(
out_channels, num_classes, kernel_size=1, bias=True, dimension=3)
self.scales = nn.ModuleList(
[Scale(1.) for _ in range(len(in_channels))])
def init_weights(self):
"""Initialize weights."""
nn.init.normal_(self.conv_center.kernel, std=.01)
nn.init.normal_(self.conv_reg.kernel, std=.01)
nn.init.normal_(self.conv_cls.kernel, std=.01)
nn.init.constant_(self.conv_cls.bias, bias_init_with_prob(.01))
def forward(self, x: List[Tensor]) -> Tuple[List[Tensor], ...]:
"""Forward pass.
Args:
x (list[Tensor]): Features from the backbone.
Returns:
Tuple[List[Tensor], ...]: Predictions of the head.
"""
center_preds, bbox_preds, cls_preds, points = [], [], [], []
inputs = x
x = inputs[-1]
prune_score = None
for i in range(len(inputs) - 1, -1, -1):
if i < len(inputs) - 1:
x = self.__getattr__(f'up_block_{i + 1}')(x)
x = inputs[i] + x
x = self._prune(x, prune_score)
out = self.__getattr__(f'out_block_{i}')(x)
center_pred, bbox_pred, cls_pred, point, prune_score = \
self._forward_single(out, self.scales[i])
center_preds.append(center_pred)
bbox_preds.append(bbox_pred)
cls_preds.append(cls_pred)
points.append(point)
return center_preds[::-1], bbox_preds[::-1], cls_preds[::-1], \
points[::-1]
def _prune(self, x: SparseTensor, scores: SparseTensor) -> SparseTensor:
"""Prunes the tensor by score thresholding.
Args:
x (SparseTensor): Tensor to be pruned.
scores (SparseTensor): Scores for thresholding.
Returns:
SparseTensor: Pruned tensor.
"""
with torch.no_grad():
coordinates = x.C.float()
interpolated_scores = scores.features_at_coordinates(coordinates)
prune_mask = interpolated_scores.new_zeros(
(len(interpolated_scores)), dtype=torch.bool)
for permutation in x.decomposition_permutations:
score = interpolated_scores[permutation]
mask = score.new_zeros((len(score)), dtype=torch.bool)
topk = min(len(score), self.pts_prune_threshold)
ids = torch.topk(score.squeeze(1), topk, sorted=False).indices
mask[ids] = True
prune_mask[permutation[mask]] = True
x = self.pruning(x, prune_mask)
return x
def _forward_single(self, x: SparseTensor,
scale: Scale) -> Tuple[Tensor, ...]:
"""Forward pass per level.
Args:
x (SparseTensor): Per level neck output tensor.
scale (mmcv.cnn.Scale): Per level multiplication weight.
Returns:
tuple[Tensor]: Per level head predictions.
"""
center_pred = self.conv_center(x).features
scores = self.conv_cls(x)
cls_pred = scores.features
prune_scores = ME.SparseTensor(
scores.features.max(dim=1, keepdim=True).values,
coordinate_map_key=scores.coordinate_map_key,
coordinate_manager=scores.coordinate_manager)
reg_final = self.conv_reg(x).features
reg_distance = torch.exp(scale(reg_final[:, :6]))
reg_angle = reg_final[:, 6:]
bbox_pred = torch.cat((reg_distance, reg_angle), dim=1)
center_preds, bbox_preds, cls_preds, points = [], [], [], []
for permutation in x.decomposition_permutations:
center_preds.append(center_pred[permutation])
bbox_preds.append(bbox_pred[permutation])
cls_preds.append(cls_pred[permutation])
points = x.decomposed_coordinates
for i in range(len(points)):
points[i] = points[i] * self.voxel_size
return center_preds, bbox_preds, cls_preds, points, prune_scores
def _loss_by_feat_single(self, center_preds: List[Tensor],
bbox_preds: List[Tensor], cls_preds: List[Tensor],
points: List[Tensor],
gt_bboxes: BaseInstance3DBoxes, gt_labels: Tensor,
input_meta: dict) -> Tuple[Tensor, ...]:
"""Loss function of single sample.
Args:
center_preds (list[Tensor]): Centerness predictions for all levels.
bbox_preds (list[Tensor]): Bbox predictions for all levels.
cls_preds (list[Tensor]): Classification predictions for all
levels.
points (list[Tensor]): Final location coordinates for all levels.
gt_bboxes (:obj:`BaseInstance3DBoxes`): Ground truth boxes.
gt_labels (Tensor): Ground truth labels.
input_meta (dict): Scene meta info.
Returns:
tuple[Tensor, ...]: Centerness, bbox, and classification loss
values.
"""
center_targets, bbox_targets, cls_targets = self.get_targets(
points, gt_bboxes, gt_labels)
center_preds = torch.cat(center_preds)
bbox_preds = torch.cat(bbox_preds)
cls_preds = torch.cat(cls_preds)
points = torch.cat(points)
# cls loss
pos_inds = torch.nonzero(cls_targets >= 0).squeeze(1)
n_pos = points.new_tensor(len(pos_inds))
n_pos = max(reduce_mean(n_pos), 1.)
cls_loss = self.cls_loss(cls_preds, cls_targets, avg_factor=n_pos)
# bbox and centerness losses
pos_center_preds = center_preds[pos_inds]
pos_bbox_preds = bbox_preds[pos_inds]
pos_center_targets = center_targets[pos_inds].unsqueeze(1)
pos_bbox_targets = bbox_targets[pos_inds]
# reduce_mean is outside if / else block to prevent deadlock
center_denorm = max(
reduce_mean(pos_center_targets.sum().detach()), 1e-6)
if len(pos_inds) > 0:
pos_points = points[pos_inds]
center_loss = self.center_loss(
pos_center_preds, pos_center_targets, avg_factor=n_pos)
bbox_loss = self.bbox_loss(
self._bbox_to_loss(
self._bbox_pred_to_bbox(pos_points, pos_bbox_preds)),
self._bbox_to_loss(pos_bbox_targets),
weight=pos_center_targets.squeeze(1),
avg_factor=center_denorm)
else:
center_loss = pos_center_preds.sum()
bbox_loss = pos_bbox_preds.sum()
return center_loss, bbox_loss, cls_loss
def loss_by_feat(self,
center_preds: List[List[Tensor]],
bbox_preds: List[List[Tensor]],
cls_preds: List[List[Tensor]],
points: List[List[Tensor]],
batch_gt_instances_3d: InstanceList,
batch_input_metas: List[dict],
batch_gt_instances_ignore: OptInstanceList = None,
**kwargs) -> dict:
"""Loss function about feature.
Args:
center_preds (list[list[Tensor]]): Centerness predictions for
all scenes. The first list contains predictions from different
levels. The second list contains predictions in a mini-batch.
bbox_preds (list[list[Tensor]]): Bbox predictions for all scenes.
The first list contains predictions from different
levels. The second list contains predictions in a mini-batch.
cls_preds (list[list[Tensor]]): Classification predictions for all
scenes. The first list contains predictions from different
levels. The second list contains predictions in a mini-batch.
points (list[list[Tensor]]): Final location coordinates for all
scenes. The first list contains predictions from different
levels. The second list contains predictions in a mini-batch.
batch_gt_instances_3d (list[:obj:`InstanceData`]): Batch of
gt_instance_3d. It usually includes ``bboxes_3d``、`
`labels_3d``、``depths``、``centers_2d`` and attributes.
batch_img_metas (list[dict]): Meta information of each image, e.g.,
image size, scaling factor, etc.
batch_gt_instances_ignore (list[:obj:`InstanceData`], Optional):
Batch of gt_instances_ignore. It includes ``bboxes`` attribute
data that is ignored during training and testing.
Defaults to None.
Returns:
dict: Centerness, bbox, and classification losses.
"""
center_losses, bbox_losses, cls_losses = [], [], []
for i in range(len(batch_input_metas)):
center_loss, bbox_loss, cls_loss = self._loss_by_feat_single(
center_preds=[x[i] for x in center_preds],
bbox_preds=[x[i] for x in bbox_preds],
cls_preds=[x[i] for x in cls_preds],
points=[x[i] for x in points],
input_meta=batch_input_metas[i],
gt_bboxes=batch_gt_instances_3d[i].bboxes_3d,
gt_labels=batch_gt_instances_3d[i].labels_3d)
center_losses.append(center_loss)
bbox_losses.append(bbox_loss)
cls_losses.append(cls_loss)
return dict(
center_loss=torch.mean(torch.stack(center_losses)),
bbox_loss=torch.mean(torch.stack(bbox_losses)),
cls_loss=torch.mean(torch.stack(cls_losses)))
def _predict_by_feat_single(self, center_preds: List[Tensor],
bbox_preds: List[Tensor],
cls_preds: List[Tensor], points: List[Tensor],
input_meta: dict) -> InstanceData:
"""Generate boxes for single sample.
Args:
center_preds (list[Tensor]): Centerness predictions for all levels.
bbox_preds (list[Tensor]): Bbox predictions for all levels.
cls_preds (list[Tensor]): Classification predictions for all
levels.
points (list[Tensor]): Final location coordinates for all levels.
input_meta (dict): Scene meta info.
Returns:
InstanceData: Predicted bounding boxes, scores and labels.
"""
mlvl_bboxes, mlvl_scores = [], []
for center_pred, bbox_pred, cls_pred, point in zip(
center_preds, bbox_preds, cls_preds, points):
scores = cls_pred.sigmoid() * center_pred.sigmoid()
max_scores, _ = scores.max(dim=1)
if len(scores) > self.test_cfg.nms_pre > 0:
_, ids = max_scores.topk(self.test_cfg.nms_pre)
bbox_pred = bbox_pred[ids]
scores = scores[ids]
point = point[ids]
bboxes = self._bbox_pred_to_bbox(point, bbox_pred)
mlvl_bboxes.append(bboxes)
mlvl_scores.append(scores)
bboxes = torch.cat(mlvl_bboxes)
scores = torch.cat(mlvl_scores)
bboxes, scores, labels = self._single_scene_multiclass_nms(
bboxes, scores, input_meta)
bboxes = input_meta['box_type_3d'](
bboxes,
box_dim=bboxes.shape[1],
with_yaw=bboxes.shape[1] == 7,
origin=(.5, .5, .5))
results = InstanceData()
results.bboxes_3d = bboxes
results.scores_3d = scores
results.labels_3d = labels
return results
def predict_by_feat(self, center_preds: List[List[Tensor]],
bbox_preds: List[List[Tensor]], cls_preds,
points: List[List[Tensor]],
batch_input_metas: List[dict],
**kwargs) -> List[InstanceData]:
"""Generate boxes for all scenes.
Args:
center_preds (list[list[Tensor]]): Centerness predictions for
all scenes.
bbox_preds (list[list[Tensor]]): Bbox predictions for all scenes.
cls_preds (list[list[Tensor]]): Classification predictions for all
scenes.
points (list[list[Tensor]]): Final location coordinates for all
scenes.
batch_input_metas (list[dict]): Meta infos for all scenes.
Returns:
list[InstanceData]: Predicted bboxes, scores, and labels for
all scenes.
"""
results = []
for i in range(len(batch_input_metas)):
result = self._predict_by_feat_single(
center_preds=[x[i] for x in center_preds],
bbox_preds=[x[i] for x in bbox_preds],
cls_preds=[x[i] for x in cls_preds],
points=[x[i] for x in points],
input_meta=batch_input_metas[i])
results.append(result)
return results
@staticmethod
def _bbox_to_loss(bbox: Tensor) -> Tensor:
"""Transform box to the axis-aligned or rotated iou loss format.
Args:
bbox (Tensor): 3D box of shape (N, 6) or (N, 7).
Returns:
Tensor: Transformed 3D box of shape (N, 6) or (N, 7).
"""
# rotated iou loss accepts (x, y, z, w, h, l, heading)
if bbox.shape[-1] != 6:
return bbox
# axis-aligned case: x, y, z, w, h, l -> x1, y1, z1, x2, y2, z2
return torch.stack(
(bbox[..., 0] - bbox[..., 3] / 2, bbox[..., 1] - bbox[..., 4] / 2,
bbox[..., 2] - bbox[..., 5] / 2, bbox[..., 0] + bbox[..., 3] / 2,
bbox[..., 1] + bbox[..., 4] / 2, bbox[..., 2] + bbox[..., 5] / 2),
dim=-1)
@staticmethod
def _bbox_pred_to_bbox(points: Tensor, bbox_pred: Tensor) -> Tensor:
"""Transform predicted bbox parameters to bbox.
Args:
points (Tensor): Final locations of shape (N, 3)
bbox_pred (Tensor): Predicted bbox parameters of shape (N, 6)
or (N, 8).
Returns:
Tensor: Transformed 3D box of shape (N, 6) or (N, 7).
"""
if bbox_pred.shape[0] == 0:
return bbox_pred
x_center = points[:, 0] + (bbox_pred[:, 1] - bbox_pred[:, 0]) / 2
y_center = points[:, 1] + (bbox_pred[:, 3] - bbox_pred[:, 2]) / 2
z_center = points[:, 2] + (bbox_pred[:, 5] - bbox_pred[:, 4]) / 2
# dx_min, dx_max, dy_min, dy_max, dz_min, dz_max -> x, y, z, w, l, h
base_bbox = torch.stack([
x_center,
y_center,
z_center,
bbox_pred[:, 0] + bbox_pred[:, 1],
bbox_pred[:, 2] + bbox_pred[:, 3],
bbox_pred[:, 4] + bbox_pred[:, 5],
], -1)
# axis-aligned case
if bbox_pred.shape[1] == 6:
return base_bbox
# rotated case: ..., sin(2a)ln(q), cos(2a)ln(q)
scale = bbox_pred[:, 0] + bbox_pred[:, 1] + \
bbox_pred[:, 2] + bbox_pred[:, 3]
q = torch.exp(
torch.sqrt(
torch.pow(bbox_pred[:, 6], 2) + torch.pow(bbox_pred[:, 7], 2)))
alpha = 0.5 * torch.atan2(bbox_pred[:, 6], bbox_pred[:, 7])
return torch.stack(
(x_center, y_center, z_center, scale / (1 + q), scale /
(1 + q) * q, bbox_pred[:, 5] + bbox_pred[:, 4], alpha),
dim=-1)
@staticmethod
def _get_face_distances(points: Tensor, boxes: Tensor) -> Tensor:
"""Calculate distances from point to box faces.
Args:
points (Tensor): Final locations of shape (N_points, N_boxes, 3).
boxes (Tensor): 3D boxes of shape (N_points, N_boxes, 7)
Returns:
Tensor: Face distances of shape (N_points, N_boxes, 6),
(dx_min, dx_max, dy_min, dy_max, dz_min, dz_max).
"""
shift = torch.stack(
(points[..., 0] - boxes[..., 0], points[..., 1] - boxes[..., 1],
points[..., 2] - boxes[..., 2]),
dim=-1).permute(1, 0, 2)
shift = rotation_3d_in_axis(
shift, -boxes[0, :, 6], axis=2).permute(1, 0, 2)
centers = boxes[..., :3] + shift
dx_min = centers[..., 0] - boxes[..., 0] + boxes[..., 3] / 2
dx_max = boxes[..., 0] + boxes[..., 3] / 2 - centers[..., 0]
dy_min = centers[..., 1] - boxes[..., 1] + boxes[..., 4] / 2
dy_max = boxes[..., 1] + boxes[..., 4] / 2 - centers[..., 1]
dz_min = centers[..., 2] - boxes[..., 2] + boxes[..., 5] / 2
dz_max = boxes[..., 2] + boxes[..., 5] / 2 - centers[..., 2]
return torch.stack((dx_min, dx_max, dy_min, dy_max, dz_min, dz_max),
dim=-1)
@staticmethod
def _get_centerness(face_distances: Tensor) -> Tensor:
"""Compute point centerness w.r.t containing box.
Args:
face_distances (Tensor): Face distances of shape (B, N, 6),
(dx_min, dx_max, dy_min, dy_max, dz_min, dz_max).
Returns:
Tensor: Centerness of shape (B, N).
"""
x_dims = face_distances[..., [0, 1]]
y_dims = face_distances[..., [2, 3]]
z_dims = face_distances[..., [4, 5]]
centerness_targets = x_dims.min(dim=-1)[0] / x_dims.max(dim=-1)[0] * \
y_dims.min(dim=-1)[0] / y_dims.max(dim=-1)[0] * \
z_dims.min(dim=-1)[0] / z_dims.max(dim=-1)[0]
return torch.sqrt(centerness_targets)
@torch.no_grad()
def get_targets(self, points: Tensor, gt_bboxes: BaseInstance3DBoxes,
gt_labels: Tensor) -> Tuple[Tensor, ...]:
"""Compute targets for final locations for a single scene.
Args:
points (list[Tensor]): Final locations for all levels.
gt_bboxes (BaseInstance3DBoxes): Ground truth boxes.
gt_labels (Tensor): Ground truth labels.
Returns:
tuple[Tensor, ...]: Centerness, bbox and classification
targets for all locations.
"""
float_max = points[0].new_tensor(1e8)
n_levels = len(points)
levels = torch.cat([
points[i].new_tensor(i).expand(len(points[i]))
for i in range(len(points))
])
points = torch.cat(points)
gt_bboxes = gt_bboxes.to(points.device)
n_points = len(points)
n_boxes = len(gt_bboxes)
volumes = gt_bboxes.volume.unsqueeze(0).expand(n_points, n_boxes)
# condition 1: point inside box
boxes = torch.cat((gt_bboxes.gravity_center, gt_bboxes.tensor[:, 3:]),
dim=1)
boxes = boxes.expand(n_points, n_boxes, 7)
points = points.unsqueeze(1).expand(n_points, n_boxes, 3)
face_distances = self._get_face_distances(points, boxes)
inside_box_condition = face_distances.min(dim=-1).values > 0
# condition 2: positive points per level >= limit
# calculate positive points per scale
n_pos_points_per_level = []
for i in range(n_levels):
n_pos_points_per_level.append(
torch.sum(inside_box_condition[levels == i], dim=0))
# find best level
n_pos_points_per_level = torch.stack(n_pos_points_per_level, dim=0)
lower_limit_mask = n_pos_points_per_level < self.pts_assign_threshold
lower_index = torch.argmax(lower_limit_mask.int(), dim=0) - 1
lower_index = torch.where(lower_index < 0, 0, lower_index)
all_upper_limit_mask = torch.all(
torch.logical_not(lower_limit_mask), dim=0)
best_level = torch.where(all_upper_limit_mask, n_levels - 1,
lower_index)
# keep only points with best level
best_level = best_level.expand(n_points, n_boxes)
levels = torch.unsqueeze(levels, 1).expand(n_points, n_boxes)
level_condition = best_level == levels
# condition 3: limit topk points per box by centerness
centerness = self._get_centerness(face_distances)
centerness = torch.where(inside_box_condition, centerness,
torch.ones_like(centerness) * -1)
centerness = torch.where(level_condition, centerness,
torch.ones_like(centerness) * -1)
top_centerness = torch.topk(
centerness,
min(self.pts_center_threshold + 1, len(centerness)),
dim=0).values[-1]
topk_condition = centerness > top_centerness.unsqueeze(0)
# condition 4: min volume box per point
volumes = torch.where(inside_box_condition, volumes, float_max)
volumes = torch.where(level_condition, volumes, float_max)
volumes = torch.where(topk_condition, volumes, float_max)
min_volumes, min_inds = volumes.min(dim=1)
center_targets = centerness[torch.arange(n_points), min_inds]
bbox_targets = boxes[torch.arange(n_points), min_inds]
if not gt_bboxes.with_yaw:
bbox_targets = bbox_targets[:, :-1]
cls_targets = gt_labels[min_inds]
cls_targets = torch.where(min_volumes == float_max, -1, cls_targets)
return center_targets, bbox_targets, cls_targets
def _single_scene_multiclass_nms(self, bboxes: Tensor, scores: Tensor,
input_meta: dict) -> Tuple[Tensor, ...]:
"""Multi-class nms for a single scene.
Args:
bboxes (Tensor): Predicted boxes of shape (N_boxes, 6) or
(N_boxes, 7).
scores (Tensor): Predicted scores of shape (N_boxes, N_classes).
input_meta (dict): Scene meta data.
Returns:
tuple[Tensor, ...]: Predicted bboxes, scores and labels.
"""
num_classes = scores.shape[1]
with_yaw = bboxes.shape[1] == 7
nms_bboxes, nms_scores, nms_labels = [], [], []
for i in range(num_classes):
ids = scores[:, i] > self.test_cfg.score_thr
if not ids.any():
continue
class_scores = scores[ids, i]
class_bboxes = bboxes[ids]
if with_yaw:
nms_function = nms3d
else:
class_bboxes = torch.cat(
(class_bboxes, torch.zeros_like(class_bboxes[:, :1])),
dim=1)
nms_function = nms3d_normal
nms_ids = nms_function(class_bboxes, class_scores,
self.test_cfg.iou_thr)
nms_bboxes.append(class_bboxes[nms_ids])
nms_scores.append(class_scores[nms_ids])
nms_labels.append(
bboxes.new_full(
class_scores[nms_ids].shape, i, dtype=torch.long))
if len(nms_bboxes):
nms_bboxes = torch.cat(nms_bboxes, dim=0)
nms_scores = torch.cat(nms_scores, dim=0)
nms_labels = torch.cat(nms_labels, dim=0)
else:
nms_bboxes = bboxes.new_zeros((0, bboxes.shape[1]))
nms_scores = bboxes.new_zeros((0, ))
nms_labels = bboxes.new_zeros((0, ))
if not with_yaw:
nms_bboxes = nms_bboxes[:, :6]
return nms_bboxes, nms_scores, nms_labels
......@@ -8,6 +8,7 @@ from .groupfree3dnet import GroupFree3DNet
from .h3dnet import H3DNet
from .imvotenet import ImVoteNet
from .imvoxelnet import ImVoxelNet
from .mink_single_stage import MinkSingleStage3DDetector
from .multiview_dfm import MultiViewDfM
from .mvx_faster_rcnn import DynamicMVXFasterRCNN, MVXFasterRCNN
from .mvx_two_stage import MVXTwoStageDetector
......@@ -21,25 +22,9 @@ from .votenet import VoteNet
from .voxelnet import VoxelNet
__all__ = [
'Base3DDetector',
'DfM',
'VoxelNet',
'DynamicVoxelNet',
'MVXTwoStageDetector',
'DynamicMVXFasterRCNN',
'MVXFasterRCNN',
'MultiViewDfM',
'PartA2',
'VoteNet',
'H3DNet',
'CenterPoint',
'SSD3DNet',
'ImVoteNet',
'SingleStageMono3DDetector',
'FCOSMono3D',
'ImVoxelNet',
'GroupFree3DNet',
'PointRCNN',
'SMOKEMono3D',
'SASSD',
'Base3DDetector', 'VoxelNet', 'DynamicVoxelNet', 'MVXTwoStageDetector',
'DynamicMVXFasterRCNN', 'MVXFasterRCNN', 'PartA2', 'VoteNet', 'H3DNet',
'CenterPoint', 'SSD3DNet', 'ImVoteNet', 'SingleStageMono3DDetector',
'FCOSMono3D', 'ImVoxelNet', 'GroupFree3DNet', 'PointRCNN', 'SMOKEMono3D',
'SASSD', 'MinkSingleStage3DDetector', 'MultiViewDfM', 'DfM'
]
# Copyright (c) OpenMMLab. All rights reserved.
# Adapted from https://github.com/SamsungLabs/fcaf3d/blob/master/mmdet3d/models/detectors/single_stage_sparse.py # noqa
from typing import Dict, Tuple, Union
import torch
from torch import Tensor
try:
import MinkowskiEngine as ME
except ImportError:
# Please follow getting_started.md to install MinkowskiEngine.
pass
from mmdet3d.registry import MODELS
from mmdet3d.utils import ConfigType, OptConfigType, OptMultiConfig
from .single_stage import SingleStage3DDetector
@MODELS.register_module()
class MinkSingleStage3DDetector(SingleStage3DDetector):
r"""MinkSingleStage3DDetector.
This class serves as a base class for single-stage 3D detectors based on
MinkowskiEngine `GSDN <https://arxiv.org/abs/2006.12356>`_.
Args:
backbone (dict): Config dict of detector's backbone.
neck (dict, optional): Config dict of neck. Defaults to None.
bbox_head (dict, optional): Config dict of box head. Defaults to None.
train_cfg (dict, optional): Config dict of training hyper-parameters.
Defaults to None.
test_cfg (dict, optional): Config dict of test hyper-parameters.
Defaults to None.
data_preprocessor (dict or ConfigDict, optional): The pre-process
config of :class:`BaseDataPreprocessor`. it usually includes,
``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``.
init_cfg (dict or ConfigDict, optional): the config to control the
initialization. Defaults to None.
"""
def __init__(self,
backbone: ConfigType,
neck: OptConfigType = None,
bbox_head: OptConfigType = None,
train_cfg: OptConfigType = None,
test_cfg: OptConfigType = None,
data_preprocessor: OptConfigType = None,
init_cfg: OptMultiConfig = None) -> None:
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
data_preprocessor=data_preprocessor,
init_cfg=init_cfg)
self.voxel_size = bbox_head['voxel_size']
# # TODO: unify the keys
# self.head = self.bbox_head
def extract_feat(
self, batch_inputs_dict: Dict[str, Tensor]
) -> Union[Tuple[torch.Tensor], Dict[str, Tensor]]:
"""Directly extract features from the backbone+neck.
Args:
batch_inputs_dict (dict): The model input dict which includes
'points' keys.
- points (list[torch.Tensor]): Point cloud of each sample.
Returns:
tuple[Tensor] | dict: For outside 3D object detection, we
typically obtain a tuple of features from the backbone + neck,
and for inside 3D object detection, usually a dict containing
features will be obtained.
"""
points = batch_inputs_dict['points']
coordinates, features = ME.utils.batch_sparse_collate(
[(p[:, :3] / self.voxel_size, p[:, 3:]) for p in points],
device=points[0].device)
x = ME.SparseTensor(coordinates=coordinates, features=features)
x = self.backbone(x)
if self.with_neck:
x = self.neck(x)
return x
......@@ -143,7 +143,11 @@ class SingleStage3DDetector(Base3DDetector):
"""Directly extract features from the backbone+neck.
Args:
points (torch.Tensor): Input points.
batch_inputs_dict (dict): The model input dict which include
'points', 'img' keys.
- points (list[torch.Tensor]): Point cloud of each sample.
- imgs (torch.Tensor, optional): Image of each sample.
Returns:
tuple[Tensor] | dict: For outside 3D object detection, we
......
......@@ -4,11 +4,12 @@ from .axis_aligned_iou_loss import AxisAlignedIoULoss, axis_aligned_iou_loss
from .chamfer_distance import ChamferDistance, chamfer_distance
from .multibin_loss import MultiBinLoss
from .paconv_regularization_loss import PAConvRegularizationLoss
from .rotated_iou_loss import RotatedIoU3DLoss, rotated_iou_3d_loss
from .uncertain_smooth_l1_loss import UncertainL1Loss, UncertainSmoothL1Loss
__all__ = [
'FocalLoss', 'SmoothL1Loss', 'binary_cross_entropy', 'ChamferDistance',
'chamfer_distance', 'axis_aligned_iou_loss', 'AxisAlignedIoULoss',
'PAConvRegularizationLoss', 'UncertainL1Loss', 'UncertainSmoothL1Loss',
'MultiBinLoss'
'MultiBinLoss', 'RotatedIoU3DLoss', 'rotated_iou_3d_loss'
]
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional
import torch
from mmcv.ops import diff_iou_rotated_3d
from torch import Tensor
from torch import nn as nn
from mmdet3d.registry import MODELS
from mmdet.models.losses.utils import weighted_loss
@weighted_loss
def rotated_iou_3d_loss(pred, target: Tensor) -> Tensor:
"""Calculate the IoU loss (1-IoU) of two sets of rotated bounding boxes.
Note that predictions and targets are one-to-one corresponded.
Args:
pred (torch.Tensor): Bbox predictions with shape [N, 7]
(x, y, z, w, l, h, alpha).
target (torch.Tensor): Bbox targets (gt) with shape [N, 7]
(x, y, z, w, l, h, alpha).
Returns:
torch.Tensor: IoU loss between predictions and targets.
"""
iou_loss = 1 - diff_iou_rotated_3d(pred.unsqueeze(0),
target.unsqueeze(0))[0]
return iou_loss
@MODELS.register_module()
class RotatedIoU3DLoss(nn.Module):
"""Calculate the IoU loss (1-IoU) of rotated bounding boxes.
Args:
reduction (str): Method to reduce losses.
The valid reduction method are none, sum or mean.
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
"""
def __init__(self,
reduction: str = 'mean',
loss_weight: Optional[float] = 1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred: Tensor,
target: Tensor,
weight: Optional[Tensor] = None,
avg_factor: Optional[int] = None,
reduction_override: Optional[str] = None,
**kwargs) -> Tensor:
"""Forward function of loss calculation.
Args:
pred (torch.Tensor): Bbox predictions with shape [..., 7]
(x, y, z, w, l, h, alpha).
target (torch.Tensor): Bbox targets (gt) with shape [..., 7]
(x, y, z, w, l, h, alpha).
weight (torch.Tensor | float, optional): Weight of loss.
Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): Method to reduce losses.
The valid reduction method are 'none', 'sum' or 'mean'.
Defaults to None.
Returns:
torch.Tensor: IoU loss between predictions and targets.
"""
if weight is not None and not torch.any(weight > 0):
return pred.sum() * weight.sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
weight = weight.mean(-1)
loss = self.loss_weight * rotated_iou_3d_loss(
pred,
target,
weight,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
......@@ -9,10 +9,19 @@ from matplotlib.collections import PatchCollection
from matplotlib.patches import PathPatch
from matplotlib.path import Path
from mmengine.dist import master_only
from mmengine.structures import InstanceData
from mmengine.visualization.utils import check_type, tensor2ndarray
from torch import Tensor
from mmdet3d.registry import VISUALIZERS
from mmdet3d.structures import (BaseInstance3DBoxes, CameraInstance3DBoxes,
Coord3DMode, DepthInstance3DBoxes,
Det3DDataSample, LiDARInstance3DBoxes,
PointData, points_cam2img)
from mmdet3d.structures.bbox_3d.box_3d_mode import Box3DMode
from mmdet.visualization import DetLocalVisualizer
from .vis_utils import (proj_camera_bbox3d_to_img, proj_depth_bbox3d_to_img,
proj_lidar_bbox3d_to_img, to_depth_mode)
try:
import open3d as o3d
......@@ -21,17 +30,6 @@ except ImportError:
raise ImportError(
'Please run "pip install open3d" to install open3d first.')
from mmengine.structures import InstanceData
from mmengine.visualization.utils import check_type, tensor2ndarray
from mmdet3d.registry import VISUALIZERS
from mmdet3d.structures import (BaseInstance3DBoxes, CameraInstance3DBoxes,
Coord3DMode, DepthInstance3DBoxes,
Det3DDataSample, LiDARInstance3DBoxes,
PointData, points_cam2img)
from .vis_utils import (proj_camera_bbox3d_to_img, proj_depth_bbox3d_to_img,
proj_lidar_bbox3d_to_img, to_depth_mode)
@VISUALIZERS.register_module()
class Det3DLocalVisualizer(DetLocalVisualizer):
......
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import pytest
import torch
from mmdet3d import * # noqa
from mmdet3d.models.dense_heads import FCAF3DHead
from tests.utils.model_utils import _create_detector_inputs
class TestAnchor3DHead(TestCase):
def test_fcaf3d_head_loss(self):
"""Test anchor head loss when truth is empty and non-empty."""
if not torch.cuda.is_available():
pytest.skip('test requires GPU and torch+cuda')
try:
import MinkowskiEngine as ME
except ImportError:
pytest.skip('test requires MinkowskiEngine installation')
# build head
fcaf3d_head = FCAF3DHead(
in_channels=(64, 128, 256, 512),
out_channels=128,
voxel_size=1.,
pts_prune_threshold=1000,
pts_assign_threshold=27,
pts_center_threshold=18,
num_classes=18,
num_reg_outs=6,
test_cfg=dict(nms_pre=1000, iou_thr=.5, score_thr=.01),
center_loss=dict(type='mmdet.CrossEntropyLoss', use_sigmoid=True),
bbox_loss=dict(type='AxisAlignedIoULoss'),
cls_loss=dict(type='mmdet.FocalLoss'),
)
fcaf3d_head = fcaf3d_head.cuda()
# fake input of head
coordinates, features = [torch.randn(500, 3).cuda() * 100
], [torch.randn(500, 3).cuda()]
tensor_coordinates, tensor_features = ME.utils.sparse_collate(
coordinates, features)
x = ME.SparseTensor(
features=tensor_features, coordinates=tensor_coordinates)
# backbone
conv1 = ME.MinkowskiConvolution(
3, 64, kernel_size=3, stride=2, dimension=3).cuda()
conv2 = ME.MinkowskiConvolution(
64, 128, kernel_size=3, stride=2, dimension=3).cuda()
conv3 = ME.MinkowskiConvolution(
128, 256, kernel_size=3, stride=2, dimension=3).cuda()
conv4 = ME.MinkowskiConvolution(
256, 512, kernel_size=3, stride=2, dimension=3).cuda()
# backbone outputs of 4 levels
x1 = conv1(x)
x2 = conv2(x1)
x3 = conv3(x2)
x4 = conv4(x3)
x = (x1, x2, x3, x4)
# fake annotation
packed_inputs = _create_detector_inputs(
with_points=False,
with_img=False,
num_gt_instance=3,
num_classes=1,
points_feat_dim=6,
gt_bboxes_dim=6)
data_samples = [
sample.cuda() for sample in packed_inputs['data_samples']
]
gt_losses = fcaf3d_head.loss(x, data_samples)
print(gt_losses)
self.assertGreaterEqual(gt_losses['cls_loss'], 0,
'cls loss should be non-zero')
self.assertGreaterEqual(gt_losses['bbox_loss'], 0,
'box loss should be non-zero')
self.assertGreaterEqual(gt_losses['center_loss'], 0,
'dir loss should be none-zero')
import unittest
import torch
from mmengine import DefaultScope
from mmdet3d.registry import MODELS
from tests.utils.model_utils import (_create_detector_inputs,
_get_detector_cfg, _setup_seed)
class TestFCAF3d(unittest.TestCase):
def test_fcaf3d(self):
try:
import MinkowskiEngine # noqa: F401
except ImportError:
return
import mmdet3d.models
assert hasattr(mmdet3d.models, 'MinkSingleStage3DDetector')
DefaultScope.get_instance('test_fcaf3d', scope_name='mmdet3d')
_setup_seed(0)
fcaf3d_net_cfg = _get_detector_cfg(
'fcaf3d/fcaf3d_8xb2_scannet-3d-18class.py')
model = MODELS.build(fcaf3d_net_cfg)
num_gt_instance = 3
packed_inputs = _create_detector_inputs(
num_gt_instance=num_gt_instance,
num_classes=1,
points_feat_dim=6,
gt_bboxes_dim=6)
if torch.cuda.is_available():
model = model.cuda()
with torch.no_grad():
data = model.data_preprocessor(packed_inputs, False)
torch.cuda.empty_cache()
results = model.forward(**data, mode='predict')
self.assertEqual(len(results), 1)
self.assertIn('bboxes_3d', results[0].pred_instances_3d)
self.assertIn('scores_3d', results[0].pred_instances_3d)
self.assertIn('labels_3d', results[0].pred_instances_3d)
losses = model.forward(**data, mode='loss')
self.assertGreater(losses['center_loss'], 0)
self.assertGreater(losses['bbox_loss'], 0)
self.assertGreater(losses['cls_loss'], 0)
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet3d.models.losses import RotatedIoU3DLoss
def test_rotated_iou_3d_loss():
if not torch.cuda.is_available():
return
boxes1 = torch.tensor([[.5, .5, .5, 1., 1., 1., .0],
[.5, .5, .5, 1., 1., 1., .0],
[.5, .5, .5, 1., 1., 1., .0],
[.5, .5, .5, 1., 1., 1., .0],
[.5, .5, .5, 1., 1., 1., .0]]).cuda()
boxes2 = torch.tensor([[.5, .5, .5, 1., 1., 1., .0],
[.5, .5, .5, 1., 1., 2., np.pi / 2],
[.5, .5, .5, 1., 1., 1., np.pi / 4],
[1., 1., 1., 1., 1., 1., .0],
[-1.5, -1.5, -1.5, 2.5, 2.5, 2.5, .0]]).cuda()
expect_ious = 1 - torch.tensor([[1., .5, .7071, 1 / 15, .0]]).cuda()
ious = RotatedIoU3DLoss(reduction='none')(boxes1, boxes2)
assert torch.allclose(ious, expect_ious, atol=1e-4)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment