Commit ba492be7 authored by zhangwenwei's avatar zhangwenwei
Browse files

Use MMDet API and pass CI

parent 9466dff7
import torch
from torch import nn
from mmdet3d.ops import DynamicScatter, build_norm_layer
from mmdet3d.ops import DynamicScatter
from mmdet.ops import build_norm_layer
from ..registry import VOXEL_ENCODERS
from .utils import PFNLayer, get_paddings_indicator
......
......@@ -2,7 +2,7 @@ import torch
from torch import nn
from torch.nn import functional as F
from ..utils import build_norm_layer
from mmdet.ops import build_norm_layer
class Empty(nn.Module):
......
......@@ -3,9 +3,9 @@ from torch import nn
from torch.nn import functional as F
from mmdet3d.ops import DynamicScatter
from mmdet.ops import build_norm_layer
from .. import builder
from ..registry import VOXEL_ENCODERS
from ..utils import build_norm_layer
from .utils import Empty, VFELayer, get_paddings_indicator
......
from mmdet.ops import (RoIAlign, SigmoidFocalLoss, build_norm_layer,
get_compiler_version, get_compiling_cuda_version, nms,
roi_align, sigmoid_focal_loss)
from mmdet.ops import (RoIAlign, SigmoidFocalLoss, get_compiler_version,
get_compiling_cuda_version, nms, roi_align,
sigmoid_focal_loss)
from .norm import NaiveSyncBatchNorm1d, NaiveSyncBatchNorm2d
from .voxel import DynamicScatter, Voxelization, dynamic_scatter, voxelization
__all__ = [
'nms', 'soft_nms', 'RoIAlign', 'roi_align', 'get_compiler_version',
'get_compiling_cuda_version', 'build_conv_layer', 'build_norm_layer',
'batched_nms', 'Voxelization', 'voxelization', 'dynamic_scatter',
'DynamicScatter', 'sigmoid_focal_loss', 'SigmoidFocalLoss'
'get_compiling_cuda_version', 'build_conv_layer', 'NaiveSyncBatchNorm1d',
'NaiveSyncBatchNorm2d', 'batched_nms', 'Voxelization', 'voxelization',
'dynamic_scatter', 'DynamicScatter', 'sigmoid_focal_loss',
'SigmoidFocalLoss'
]
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.autograd.function import Function
from mmdet.ops.norm import norm_cfg
from .sync_bn import NaiveSyncBatchNorm1d, NaiveSyncBatchNorm2d
class AllReduce(Function):
@staticmethod
def forward(ctx, input):
input_list = [
torch.zeros_like(input) for k in range(dist.get_world_size())
]
# Use allgather instead of allreduce in-place operations is unreliable
dist.all_gather(input_list, input, async_op=False)
inputs = torch.stack(input_list, dim=0)
return torch.sum(inputs, dim=0)
@staticmethod
def backward(ctx, grad_output):
dist.all_reduce(grad_output, async_op=False)
return grad_output
class NaiveSyncBatchNorm1d(nn.BatchNorm1d):
"""Syncronized Batch Normalization for 3D Tensors
Note:
This implementation is modified from
https://github.com/facebookresearch/detectron2/
`torch.nn.SyncBatchNorm` has known unknown bugs.
It produces significantly worse AP (and sometimes goes NaN)
when the batch size on each worker is quite different
(e.g., when scale augmentation is used).
In 3D detection, different workers has points of different shapes,
whish also cause instability.
Use this implementation before `nn.SyncBatchNorm` is fixed.
It is slower than `nn.SyncBatchNorm`.
"""
def forward(self, input):
if dist.get_world_size() == 1 or not self.training:
return super().forward(input)
assert input.shape[0] > 0, 'SyncBN does not support empty inputs'
C = input.shape[1]
mean = torch.mean(input, dim=[0, 2])
meansqr = torch.mean(input * input, dim=[0, 2])
vec = torch.cat([mean, meansqr], dim=0)
vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())
mean, meansqr = torch.split(vec, C)
var = meansqr - mean * mean
self.running_mean += self.momentum * (
mean.detach() - self.running_mean)
self.running_var += self.momentum * (var.detach() - self.running_var)
invstd = torch.rsqrt(var + self.eps)
scale = self.weight * invstd
bias = self.bias - mean * scale
scale = scale.reshape(1, -1, 1)
bias = bias.reshape(1, -1, 1)
return input * scale + bias
class NaiveSyncBatchNorm2d(nn.BatchNorm2d):
"""Syncronized Batch Normalization for 4D Tensors
Note:
This implementation is modified from
https://github.com/facebookresearch/detectron2/
`torch.nn.SyncBatchNorm` has known unknown bugs.
It produces significantly worse AP (and sometimes goes NaN)
when the batch size on each worker is quite different
(e.g., when scale augmentation is used).
This phenomenon also occurs when the multi-modality feature fusion
modules of multi-modality detectors use SyncBN.
Use this implementation before `nn.SyncBatchNorm` is fixed.
It is slower than `nn.SyncBatchNorm`.
"""
def forward(self, input):
if dist.get_world_size() == 1 or not self.training:
return super().forward(input)
assert input.shape[0] > 0, 'SyncBN does not support empty inputs'
C = input.shape[1]
mean = torch.mean(input, dim=[0, 2, 3])
meansqr = torch.mean(input * input, dim=[0, 2, 3])
vec = torch.cat([mean, meansqr], dim=0)
vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())
mean, meansqr = torch.split(vec, C)
var = meansqr - mean * mean
self.running_mean += self.momentum * (
mean.detach() - self.running_mean)
self.running_var += self.momentum * (var.detach() - self.running_var)
invstd = torch.rsqrt(var + self.eps)
scale = self.weight * invstd
bias = self.bias - mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return input * scale + bias
norm_cfg.update({
'BN1d': ('bn', nn.BatchNorm1d),
......
import torch
import torch.distributed as dist
import torch.nn as nn
from torch.autograd.function import Function
class AllReduce(Function):
@staticmethod
def forward(ctx, input):
input_list = [
torch.zeros_like(input) for k in range(dist.get_world_size())
]
# Use allgather instead of allreduce in-place operations is unreliable
dist.all_gather(input_list, input, async_op=False)
inputs = torch.stack(input_list, dim=0)
return torch.sum(inputs, dim=0)
@staticmethod
def backward(ctx, grad_output):
dist.all_reduce(grad_output, async_op=False)
return grad_output
class NaiveSyncBatchNorm1d(nn.BatchNorm1d):
"""Syncronized Batch Normalization for 3D Tensors
Note:
This implementation is modified from
https://github.com/facebookresearch/detectron2/
`torch.nn.SyncBatchNorm` has known unknown bugs.
It produces significantly worse AP (and sometimes goes NaN)
when the batch size on each worker is quite different
(e.g., when scale augmentation is used).
In 3D detection, different workers has points of different shapes,
whish also cause instability.
Use this implementation before `nn.SyncBatchNorm` is fixed.
It is slower than `nn.SyncBatchNorm`.
"""
def forward(self, input):
if dist.get_world_size() == 1 or not self.training:
return super().forward(input)
assert input.shape[0] > 0, 'SyncBN does not support empty inputs'
C = input.shape[1]
mean = torch.mean(input, dim=[0, 2])
meansqr = torch.mean(input * input, dim=[0, 2])
vec = torch.cat([mean, meansqr], dim=0)
vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())
mean, meansqr = torch.split(vec, C)
var = meansqr - mean * mean
self.running_mean += self.momentum * (
mean.detach() - self.running_mean)
self.running_var += self.momentum * (var.detach() - self.running_var)
invstd = torch.rsqrt(var + self.eps)
scale = self.weight * invstd
bias = self.bias - mean * scale
scale = scale.reshape(1, -1, 1)
bias = bias.reshape(1, -1, 1)
return input * scale + bias
class NaiveSyncBatchNorm2d(nn.BatchNorm2d):
"""Syncronized Batch Normalization for 4D Tensors
Note:
This implementation is modified from
https://github.com/facebookresearch/detectron2/
`torch.nn.SyncBatchNorm` has known unknown bugs.
It produces significantly worse AP (and sometimes goes NaN)
when the batch size on each worker is quite different
(e.g., when scale augmentation is used).
This phenomenon also occurs when the multi-modality feature fusion
modules of multi-modality detectors use SyncBN.
Use this implementation before `nn.SyncBatchNorm` is fixed.
It is slower than `nn.SyncBatchNorm`.
"""
def forward(self, input):
if dist.get_world_size() == 1 or not self.training:
return super().forward(input)
assert input.shape[0] > 0, 'SyncBN does not support empty inputs'
C = input.shape[1]
mean = torch.mean(input, dim=[0, 2, 3])
meansqr = torch.mean(input * input, dim=[0, 2, 3])
vec = torch.cat([mean, meansqr], dim=0)
vec = AllReduce.apply(vec) * (1.0 / dist.get_world_size())
mean, meansqr = torch.split(vec, C)
var = meansqr - mean * mean
self.running_mean += self.momentum * (
mean.detach() - self.running_mean)
self.running_var += self.momentum * (var.detach() - self.running_var)
invstd = torch.rsqrt(var + self.eps)
scale = self.weight * invstd
bias = self.bias - mean * scale
scale = scale.reshape(1, -1, 1, 1)
bias = bias.reshape(1, -1, 1, 1)
return input * scale + bias
......@@ -99,7 +99,7 @@ def make_cuda_ext(name,
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = [
extra_compile_args['nvcc'] = extra_args + [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
......@@ -248,13 +248,6 @@ if __name__ == '__main__':
'src/iou3d.cpp',
'src/iou3d_kernel.cu',
]),
make_cuda_ext(
name='sigmoid_focal_loss_cuda',
module='mmdet3d.ops.sigmoid_focal_loss',
sources=[
'src/sigmoid_focal_loss.cpp',
'src/sigmoid_focal_loss_cuda.cu'
]),
make_cuda_ext(
name='voxel_layer',
module='mmdet3d.ops.voxel',
......
"""
Tests the Assigner objects.
CommandLine:
pytest tests/test_assigner.py
xdoctest tests/test_assigner.py zero
"""
import torch
from mmdet3d.core.bbox.assigners import MaxIoUAssigner
def test_max_iou_assigner():
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 4
assert len(assign_result.labels) == 4
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_ignore():
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[30, 32, 40, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
assign_result = self.assign(
bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore)
expected_gt_inds = torch.LongTensor([1, 0, 2, -1])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_empty_gt():
"""
Test corner case where an image might have no true detections
"""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(bboxes, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_empty_boxes():
"""
Test corner case where an network might predict no boxes
"""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
# Test with gt_labels
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 0
assert tuple(assign_result.labels.shape) == (0, )
# Test without gt_labels
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=None)
assert len(assign_result.gt_inds) == 0
assert assign_result.labels is None
def test_max_iou_assigner_with_empty_boxes_and_ignore():
"""
Test corner case where an network might predict no boxes and ignore_iof_thr
is on
"""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
gt_labels = torch.LongTensor([2, 3])
# Test with gt_labels
assign_result = self.assign(
bboxes,
gt_bboxes,
gt_labels=gt_labels,
gt_bboxes_ignore=gt_bboxes_ignore)
assert len(assign_result.gt_inds) == 0
assert tuple(assign_result.labels.shape) == (0, )
# Test without gt_labels
assign_result = self.assign(
bboxes, gt_bboxes, gt_labels=None, gt_bboxes_ignore=gt_bboxes_ignore)
assert len(assign_result.gt_inds) == 0
assert assign_result.labels is None
def test_max_iou_assigner_with_empty_boxes_and_gt():
"""
Test corner case where an network might predict no boxes and no gt
"""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
assign_result = self.assign(bboxes, gt_bboxes)
assert len(assign_result.gt_inds) == 0
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment