Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
ba492be7
Commit
ba492be7
authored
Apr 15, 2020
by
zhangwenwei
Browse files
Use MMDet API and pass CI
parent
9466dff7
Changes
68
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
284 additions
and
128 deletions
+284
-128
mmdet3d/models/voxel_encoders/pillar_encoder.py
mmdet3d/models/voxel_encoders/pillar_encoder.py
+2
-1
mmdet3d/models/voxel_encoders/utils.py
mmdet3d/models/voxel_encoders/utils.py
+1
-1
mmdet3d/models/voxel_encoders/voxel_encoder.py
mmdet3d/models/voxel_encoders/voxel_encoder.py
+1
-1
mmdet3d/ops/__init__.py
mmdet3d/ops/__init__.py
+8
-6
mmdet3d/ops/norm.py
mmdet3d/ops/norm.py
+110
-1
mmdet3d/ops/sync_bn.py
mmdet3d/ops/sync_bn.py
+0
-110
setup.py
setup.py
+1
-8
tests/test_assigners.py
tests/test_assigners.py
+161
-0
No files found.
mmdet3d/models/voxel_encoders/pillar_encoder.py
View file @
ba492be7
import
torch
from
torch
import
nn
from
mmdet3d.ops
import
DynamicScatter
,
build_norm_layer
from
mmdet3d.ops
import
DynamicScatter
from
mmdet.ops
import
build_norm_layer
from
..registry
import
VOXEL_ENCODERS
from
.utils
import
PFNLayer
,
get_paddings_indicator
...
...
mmdet3d/models/voxel_encoders/utils.py
View file @
ba492be7
...
...
@@ -2,7 +2,7 @@ import torch
from
torch
import
nn
from
torch.nn
import
functional
as
F
from
..util
s
import
build_norm_layer
from
mmdet.op
s
import
build_norm_layer
class
Empty
(
nn
.
Module
):
...
...
mmdet3d/models/voxel_encoders/voxel_encoder.py
View file @
ba492be7
...
...
@@ -3,9 +3,9 @@ from torch import nn
from
torch.nn
import
functional
as
F
from
mmdet3d.ops
import
DynamicScatter
from
mmdet.ops
import
build_norm_layer
from
..
import
builder
from
..registry
import
VOXEL_ENCODERS
from
..utils
import
build_norm_layer
from
.utils
import
Empty
,
VFELayer
,
get_paddings_indicator
...
...
mmdet3d/ops/__init__.py
View file @
ba492be7
from
mmdet.ops
import
(
RoIAlign
,
SigmoidFocalLoss
,
build_norm_layer
,
get_compiler_version
,
get_compiling_cuda_version
,
nms
,
roi_align
,
sigmoid_focal_loss
)
from
mmdet.ops
import
(
RoIAlign
,
SigmoidFocalLoss
,
get_compiler_version
,
get_compiling_cuda_version
,
nms
,
roi_align
,
sigmoid_focal_loss
)
from
.norm
import
NaiveSyncBatchNorm1d
,
NaiveSyncBatchNorm2d
from
.voxel
import
DynamicScatter
,
Voxelization
,
dynamic_scatter
,
voxelization
__all__
=
[
'nms'
,
'soft_nms'
,
'RoIAlign'
,
'roi_align'
,
'get_compiler_version'
,
'get_compiling_cuda_version'
,
'build_conv_layer'
,
'build_norm_layer'
,
'batched_nms'
,
'Voxelization'
,
'voxelization'
,
'dynamic_scatter'
,
'DynamicScatter'
,
'sigmoid_focal_loss'
,
'SigmoidFocalLoss'
'get_compiling_cuda_version'
,
'build_conv_layer'
,
'NaiveSyncBatchNorm1d'
,
'NaiveSyncBatchNorm2d'
,
'batched_nms'
,
'Voxelization'
,
'voxelization'
,
'dynamic_scatter'
,
'DynamicScatter'
,
'sigmoid_focal_loss'
,
'SigmoidFocalLoss'
]
mmdet3d/ops/norm.py
View file @
ba492be7
import
torch
import
torch.distributed
as
dist
import
torch.nn
as
nn
from
torch.autograd.function
import
Function
from
mmdet.ops.norm
import
norm_cfg
from
.sync_bn
import
NaiveSyncBatchNorm1d
,
NaiveSyncBatchNorm2d
class
AllReduce
(
Function
):
@
staticmethod
def
forward
(
ctx
,
input
):
input_list
=
[
torch
.
zeros_like
(
input
)
for
k
in
range
(
dist
.
get_world_size
())
]
# Use allgather instead of allreduce in-place operations is unreliable
dist
.
all_gather
(
input_list
,
input
,
async_op
=
False
)
inputs
=
torch
.
stack
(
input_list
,
dim
=
0
)
return
torch
.
sum
(
inputs
,
dim
=
0
)
@
staticmethod
def
backward
(
ctx
,
grad_output
):
dist
.
all_reduce
(
grad_output
,
async_op
=
False
)
return
grad_output
class
NaiveSyncBatchNorm1d
(
nn
.
BatchNorm1d
):
"""Syncronized Batch Normalization for 3D Tensors
Note:
This implementation is modified from
https://github.com/facebookresearch/detectron2/
`torch.nn.SyncBatchNorm` has known unknown bugs.
It produces significantly worse AP (and sometimes goes NaN)
when the batch size on each worker is quite different
(e.g., when scale augmentation is used).
In 3D detection, different workers has points of different shapes,
whish also cause instability.
Use this implementation before `nn.SyncBatchNorm` is fixed.
It is slower than `nn.SyncBatchNorm`.
"""
def
forward
(
self
,
input
):
if
dist
.
get_world_size
()
==
1
or
not
self
.
training
:
return
super
().
forward
(
input
)
assert
input
.
shape
[
0
]
>
0
,
'SyncBN does not support empty inputs'
C
=
input
.
shape
[
1
]
mean
=
torch
.
mean
(
input
,
dim
=
[
0
,
2
])
meansqr
=
torch
.
mean
(
input
*
input
,
dim
=
[
0
,
2
])
vec
=
torch
.
cat
([
mean
,
meansqr
],
dim
=
0
)
vec
=
AllReduce
.
apply
(
vec
)
*
(
1.0
/
dist
.
get_world_size
())
mean
,
meansqr
=
torch
.
split
(
vec
,
C
)
var
=
meansqr
-
mean
*
mean
self
.
running_mean
+=
self
.
momentum
*
(
mean
.
detach
()
-
self
.
running_mean
)
self
.
running_var
+=
self
.
momentum
*
(
var
.
detach
()
-
self
.
running_var
)
invstd
=
torch
.
rsqrt
(
var
+
self
.
eps
)
scale
=
self
.
weight
*
invstd
bias
=
self
.
bias
-
mean
*
scale
scale
=
scale
.
reshape
(
1
,
-
1
,
1
)
bias
=
bias
.
reshape
(
1
,
-
1
,
1
)
return
input
*
scale
+
bias
class
NaiveSyncBatchNorm2d
(
nn
.
BatchNorm2d
):
"""Syncronized Batch Normalization for 4D Tensors
Note:
This implementation is modified from
https://github.com/facebookresearch/detectron2/
`torch.nn.SyncBatchNorm` has known unknown bugs.
It produces significantly worse AP (and sometimes goes NaN)
when the batch size on each worker is quite different
(e.g., when scale augmentation is used).
This phenomenon also occurs when the multi-modality feature fusion
modules of multi-modality detectors use SyncBN.
Use this implementation before `nn.SyncBatchNorm` is fixed.
It is slower than `nn.SyncBatchNorm`.
"""
def
forward
(
self
,
input
):
if
dist
.
get_world_size
()
==
1
or
not
self
.
training
:
return
super
().
forward
(
input
)
assert
input
.
shape
[
0
]
>
0
,
'SyncBN does not support empty inputs'
C
=
input
.
shape
[
1
]
mean
=
torch
.
mean
(
input
,
dim
=
[
0
,
2
,
3
])
meansqr
=
torch
.
mean
(
input
*
input
,
dim
=
[
0
,
2
,
3
])
vec
=
torch
.
cat
([
mean
,
meansqr
],
dim
=
0
)
vec
=
AllReduce
.
apply
(
vec
)
*
(
1.0
/
dist
.
get_world_size
())
mean
,
meansqr
=
torch
.
split
(
vec
,
C
)
var
=
meansqr
-
mean
*
mean
self
.
running_mean
+=
self
.
momentum
*
(
mean
.
detach
()
-
self
.
running_mean
)
self
.
running_var
+=
self
.
momentum
*
(
var
.
detach
()
-
self
.
running_var
)
invstd
=
torch
.
rsqrt
(
var
+
self
.
eps
)
scale
=
self
.
weight
*
invstd
bias
=
self
.
bias
-
mean
*
scale
scale
=
scale
.
reshape
(
1
,
-
1
,
1
,
1
)
bias
=
bias
.
reshape
(
1
,
-
1
,
1
,
1
)
return
input
*
scale
+
bias
norm_cfg
.
update
({
'BN1d'
:
(
'bn'
,
nn
.
BatchNorm1d
),
...
...
mmdet3d/ops/sync_bn.py
deleted
100644 → 0
View file @
9466dff7
import
torch
import
torch.distributed
as
dist
import
torch.nn
as
nn
from
torch.autograd.function
import
Function
class
AllReduce
(
Function
):
@
staticmethod
def
forward
(
ctx
,
input
):
input_list
=
[
torch
.
zeros_like
(
input
)
for
k
in
range
(
dist
.
get_world_size
())
]
# Use allgather instead of allreduce in-place operations is unreliable
dist
.
all_gather
(
input_list
,
input
,
async_op
=
False
)
inputs
=
torch
.
stack
(
input_list
,
dim
=
0
)
return
torch
.
sum
(
inputs
,
dim
=
0
)
@
staticmethod
def
backward
(
ctx
,
grad_output
):
dist
.
all_reduce
(
grad_output
,
async_op
=
False
)
return
grad_output
class
NaiveSyncBatchNorm1d
(
nn
.
BatchNorm1d
):
"""Syncronized Batch Normalization for 3D Tensors
Note:
This implementation is modified from
https://github.com/facebookresearch/detectron2/
`torch.nn.SyncBatchNorm` has known unknown bugs.
It produces significantly worse AP (and sometimes goes NaN)
when the batch size on each worker is quite different
(e.g., when scale augmentation is used).
In 3D detection, different workers has points of different shapes,
whish also cause instability.
Use this implementation before `nn.SyncBatchNorm` is fixed.
It is slower than `nn.SyncBatchNorm`.
"""
def
forward
(
self
,
input
):
if
dist
.
get_world_size
()
==
1
or
not
self
.
training
:
return
super
().
forward
(
input
)
assert
input
.
shape
[
0
]
>
0
,
'SyncBN does not support empty inputs'
C
=
input
.
shape
[
1
]
mean
=
torch
.
mean
(
input
,
dim
=
[
0
,
2
])
meansqr
=
torch
.
mean
(
input
*
input
,
dim
=
[
0
,
2
])
vec
=
torch
.
cat
([
mean
,
meansqr
],
dim
=
0
)
vec
=
AllReduce
.
apply
(
vec
)
*
(
1.0
/
dist
.
get_world_size
())
mean
,
meansqr
=
torch
.
split
(
vec
,
C
)
var
=
meansqr
-
mean
*
mean
self
.
running_mean
+=
self
.
momentum
*
(
mean
.
detach
()
-
self
.
running_mean
)
self
.
running_var
+=
self
.
momentum
*
(
var
.
detach
()
-
self
.
running_var
)
invstd
=
torch
.
rsqrt
(
var
+
self
.
eps
)
scale
=
self
.
weight
*
invstd
bias
=
self
.
bias
-
mean
*
scale
scale
=
scale
.
reshape
(
1
,
-
1
,
1
)
bias
=
bias
.
reshape
(
1
,
-
1
,
1
)
return
input
*
scale
+
bias
class
NaiveSyncBatchNorm2d
(
nn
.
BatchNorm2d
):
"""Syncronized Batch Normalization for 4D Tensors
Note:
This implementation is modified from
https://github.com/facebookresearch/detectron2/
`torch.nn.SyncBatchNorm` has known unknown bugs.
It produces significantly worse AP (and sometimes goes NaN)
when the batch size on each worker is quite different
(e.g., when scale augmentation is used).
This phenomenon also occurs when the multi-modality feature fusion
modules of multi-modality detectors use SyncBN.
Use this implementation before `nn.SyncBatchNorm` is fixed.
It is slower than `nn.SyncBatchNorm`.
"""
def
forward
(
self
,
input
):
if
dist
.
get_world_size
()
==
1
or
not
self
.
training
:
return
super
().
forward
(
input
)
assert
input
.
shape
[
0
]
>
0
,
'SyncBN does not support empty inputs'
C
=
input
.
shape
[
1
]
mean
=
torch
.
mean
(
input
,
dim
=
[
0
,
2
,
3
])
meansqr
=
torch
.
mean
(
input
*
input
,
dim
=
[
0
,
2
,
3
])
vec
=
torch
.
cat
([
mean
,
meansqr
],
dim
=
0
)
vec
=
AllReduce
.
apply
(
vec
)
*
(
1.0
/
dist
.
get_world_size
())
mean
,
meansqr
=
torch
.
split
(
vec
,
C
)
var
=
meansqr
-
mean
*
mean
self
.
running_mean
+=
self
.
momentum
*
(
mean
.
detach
()
-
self
.
running_mean
)
self
.
running_var
+=
self
.
momentum
*
(
var
.
detach
()
-
self
.
running_var
)
invstd
=
torch
.
rsqrt
(
var
+
self
.
eps
)
scale
=
self
.
weight
*
invstd
bias
=
self
.
bias
-
mean
*
scale
scale
=
scale
.
reshape
(
1
,
-
1
,
1
,
1
)
bias
=
bias
.
reshape
(
1
,
-
1
,
1
,
1
)
return
input
*
scale
+
bias
setup.py
View file @
ba492be7
...
...
@@ -99,7 +99,7 @@ def make_cuda_ext(name,
if
torch
.
cuda
.
is_available
()
or
os
.
getenv
(
'FORCE_CUDA'
,
'0'
)
==
'1'
:
define_macros
+=
[(
'WITH_CUDA'
,
None
)]
extension
=
CUDAExtension
extra_compile_args
[
'nvcc'
]
=
[
extra_compile_args
[
'nvcc'
]
=
extra_args
+
[
'-D__CUDA_NO_HALF_OPERATORS__'
,
'-D__CUDA_NO_HALF_CONVERSIONS__'
,
'-D__CUDA_NO_HALF2_OPERATORS__'
,
...
...
@@ -248,13 +248,6 @@ if __name__ == '__main__':
'src/iou3d.cpp'
,
'src/iou3d_kernel.cu'
,
]),
make_cuda_ext
(
name
=
'sigmoid_focal_loss_cuda'
,
module
=
'mmdet3d.ops.sigmoid_focal_loss'
,
sources
=
[
'src/sigmoid_focal_loss.cpp'
,
'src/sigmoid_focal_loss_cuda.cu'
]),
make_cuda_ext
(
name
=
'voxel_layer'
,
module
=
'mmdet3d.ops.voxel'
,
...
...
tests/test_assigners.py
0 → 100644
View file @
ba492be7
"""
Tests the Assigner objects.
CommandLine:
pytest tests/test_assigner.py
xdoctest tests/test_assigner.py zero
"""
import
torch
from
mmdet3d.core.bbox.assigners
import
MaxIoUAssigner
def
test_max_iou_assigner
():
self
=
MaxIoUAssigner
(
pos_iou_thr
=
0.5
,
neg_iou_thr
=
0.5
,
)
bboxes
=
torch
.
FloatTensor
([
[
0
,
0
,
10
,
10
],
[
10
,
10
,
20
,
20
],
[
5
,
5
,
15
,
15
],
[
32
,
32
,
38
,
42
],
])
gt_bboxes
=
torch
.
FloatTensor
([
[
0
,
0
,
10
,
9
],
[
0
,
10
,
10
,
19
],
])
gt_labels
=
torch
.
LongTensor
([
2
,
3
])
assign_result
=
self
.
assign
(
bboxes
,
gt_bboxes
,
gt_labels
=
gt_labels
)
assert
len
(
assign_result
.
gt_inds
)
==
4
assert
len
(
assign_result
.
labels
)
==
4
expected_gt_inds
=
torch
.
LongTensor
([
1
,
0
,
2
,
0
])
assert
torch
.
all
(
assign_result
.
gt_inds
==
expected_gt_inds
)
def
test_max_iou_assigner_with_ignore
():
self
=
MaxIoUAssigner
(
pos_iou_thr
=
0.5
,
neg_iou_thr
=
0.5
,
ignore_iof_thr
=
0.5
,
ignore_wrt_candidates
=
False
,
)
bboxes
=
torch
.
FloatTensor
([
[
0
,
0
,
10
,
10
],
[
10
,
10
,
20
,
20
],
[
5
,
5
,
15
,
15
],
[
30
,
32
,
40
,
42
],
])
gt_bboxes
=
torch
.
FloatTensor
([
[
0
,
0
,
10
,
9
],
[
0
,
10
,
10
,
19
],
])
gt_bboxes_ignore
=
torch
.
Tensor
([
[
30
,
30
,
40
,
40
],
])
assign_result
=
self
.
assign
(
bboxes
,
gt_bboxes
,
gt_bboxes_ignore
=
gt_bboxes_ignore
)
expected_gt_inds
=
torch
.
LongTensor
([
1
,
0
,
2
,
-
1
])
assert
torch
.
all
(
assign_result
.
gt_inds
==
expected_gt_inds
)
def
test_max_iou_assigner_with_empty_gt
():
"""
Test corner case where an image might have no true detections
"""
self
=
MaxIoUAssigner
(
pos_iou_thr
=
0.5
,
neg_iou_thr
=
0.5
,
)
bboxes
=
torch
.
FloatTensor
([
[
0
,
0
,
10
,
10
],
[
10
,
10
,
20
,
20
],
[
5
,
5
,
15
,
15
],
[
32
,
32
,
38
,
42
],
])
gt_bboxes
=
torch
.
FloatTensor
([])
assign_result
=
self
.
assign
(
bboxes
,
gt_bboxes
)
expected_gt_inds
=
torch
.
LongTensor
([
0
,
0
,
0
,
0
])
assert
torch
.
all
(
assign_result
.
gt_inds
==
expected_gt_inds
)
def
test_max_iou_assigner_with_empty_boxes
():
"""
Test corner case where an network might predict no boxes
"""
self
=
MaxIoUAssigner
(
pos_iou_thr
=
0.5
,
neg_iou_thr
=
0.5
,
)
bboxes
=
torch
.
empty
((
0
,
4
))
gt_bboxes
=
torch
.
FloatTensor
([
[
0
,
0
,
10
,
9
],
[
0
,
10
,
10
,
19
],
])
gt_labels
=
torch
.
LongTensor
([
2
,
3
])
# Test with gt_labels
assign_result
=
self
.
assign
(
bboxes
,
gt_bboxes
,
gt_labels
=
gt_labels
)
assert
len
(
assign_result
.
gt_inds
)
==
0
assert
tuple
(
assign_result
.
labels
.
shape
)
==
(
0
,
)
# Test without gt_labels
assign_result
=
self
.
assign
(
bboxes
,
gt_bboxes
,
gt_labels
=
None
)
assert
len
(
assign_result
.
gt_inds
)
==
0
assert
assign_result
.
labels
is
None
def
test_max_iou_assigner_with_empty_boxes_and_ignore
():
"""
Test corner case where an network might predict no boxes and ignore_iof_thr
is on
"""
self
=
MaxIoUAssigner
(
pos_iou_thr
=
0.5
,
neg_iou_thr
=
0.5
,
ignore_iof_thr
=
0.5
,
)
bboxes
=
torch
.
empty
((
0
,
4
))
gt_bboxes
=
torch
.
FloatTensor
([
[
0
,
0
,
10
,
9
],
[
0
,
10
,
10
,
19
],
])
gt_bboxes_ignore
=
torch
.
Tensor
([
[
30
,
30
,
40
,
40
],
])
gt_labels
=
torch
.
LongTensor
([
2
,
3
])
# Test with gt_labels
assign_result
=
self
.
assign
(
bboxes
,
gt_bboxes
,
gt_labels
=
gt_labels
,
gt_bboxes_ignore
=
gt_bboxes_ignore
)
assert
len
(
assign_result
.
gt_inds
)
==
0
assert
tuple
(
assign_result
.
labels
.
shape
)
==
(
0
,
)
# Test without gt_labels
assign_result
=
self
.
assign
(
bboxes
,
gt_bboxes
,
gt_labels
=
None
,
gt_bboxes_ignore
=
gt_bboxes_ignore
)
assert
len
(
assign_result
.
gt_inds
)
==
0
assert
assign_result
.
labels
is
None
def
test_max_iou_assigner_with_empty_boxes_and_gt
():
"""
Test corner case where an network might predict no boxes and no gt
"""
self
=
MaxIoUAssigner
(
pos_iou_thr
=
0.5
,
neg_iou_thr
=
0.5
,
)
bboxes
=
torch
.
empty
((
0
,
4
))
gt_bboxes
=
torch
.
empty
((
0
,
4
))
assign_result
=
self
.
assign
(
bboxes
,
gt_bboxes
)
assert
len
(
assign_result
.
gt_inds
)
==
0
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment