Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
bd20e7b6
"tools/vscode:/vscode.git/clone" did not exist on "d1aac35d68a203955a32bca4635429f620fc08dd"
Commit
bd20e7b6
authored
May 15, 2020
by
liyinhao
Browse files
Merge branch 'master_tmp' into indoor_dataset
parents
92018ce1
535344de
Changes
21
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
325 additions
and
122 deletions
+325
-122
.gitlab-ci.yml
.gitlab-ci.yml
+2
-1
configs/kitti/dv_mvx-v2_second_secfpn_fpn-fusion_adamw_2x8_80e_kitti-3d-3class.py
...second_secfpn_fpn-fusion_adamw_2x8_80e_kitti-3d-3class.py
+2
-2
configs/kitti/dv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py
...igs/kitti/dv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py
+2
-2
configs/kitti/dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class.py
.../kitti/dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class.py
+2
-2
configs/kitti/dv_second_secfpn_6x8_80e_kitti-3d-car.py
configs/kitti/dv_second_secfpn_6x8_80e_kitti-3d-car.py
+2
-2
configs/kitti/hv_PartA2_secfpn_4x8_cosine_80e_kitti-3d-3class.py
.../kitti/hv_PartA2_secfpn_4x8_cosine_80e_kitti-3d-3class.py
+2
-2
configs/kitti/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py
...igs/kitti/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py
+2
-2
configs/kitti/hv_second_secfpn_6x8_80e_kitti-3d-car.py
configs/kitti/hv_second_secfpn_6x8_80e_kitti-3d-car.py
+2
-2
configs/nus/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d.py
configs/nus/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d.py
+2
-2
mmdet3d/datasets/pipelines/formating.py
mmdet3d/datasets/pipelines/formating.py
+2
-1
mmdet3d/datasets/pipelines/indoor_augment.py
mmdet3d/datasets/pipelines/indoor_augment.py
+1
-1
mmdet3d/datasets/pipelines/indoor_loading.py
mmdet3d/datasets/pipelines/indoor_loading.py
+2
-2
mmdet3d/models/backbones/second.py
mmdet3d/models/backbones/second.py
+32
-39
mmdet3d/models/necks/second_fpn.py
mmdet3d/models/necks/second_fpn.py
+48
-43
requirements/build.txt
requirements/build.txt
+1
-1
requirements/runtime.txt
requirements/runtime.txt
+1
-1
tests/test_fpn.py
tests/test_fpn.py
+37
-0
tests/test_pipeline/test_indoor_augment.py
tests/test_pipeline/test_indoor_augment.py
+2
-2
tests/test_pipeline/test_indoor_loading.py
tests/test_pipeline/test_indoor_loading.py
+16
-15
tests/test_pipeline/test_indoor_pipeline.py
tests/test_pipeline/test_indoor_pipeline.py
+165
-0
No files found.
.gitlab-ci.yml
View file @
bd20e7b6
...
...
@@ -16,7 +16,7 @@ before_script:
.linting_template
:
&linting_template_def
stage
:
linting
script
:
-
pip install flake8 yapf isort
-
pip install flake8
==3.7.9
yapf isort
-
flake8 .
-
isort -rc --check-only --diff mmdet3d/ tools/ tests/
-
yapf -r -d mmdet3d/ tools/ tests/ configs/
...
...
@@ -26,6 +26,7 @@ before_script:
script
:
-
echo "Start building..."
-
pip install "git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI"
-
pip install git+https://github.com/open-mmlab/mmcv.git
-
pip install git+https://github.com/open-mmlab/mmdetection.git
-
python -c "import mmdet; print(mmdet.__version__)"
-
pip install -v -e .[all]
...
...
configs/kitti/dv_mvx-v2_second_secfpn_fpn-fusion_adamw_2x8_80e_kitti-3d-3class.py
View file @
bd20e7b6
...
...
@@ -57,13 +57,13 @@ model = dict(
in_channels
=
256
,
layer_nums
=
[
5
,
5
],
layer_strides
=
[
1
,
2
],
num_filter
s
=
[
128
,
256
],
out_channel
s
=
[
128
,
256
],
),
pts_neck
=
dict
(
type
=
'SECONDFPN'
,
in_channels
=
[
128
,
256
],
upsample_strides
=
[
1
,
2
],
num_upsample_filter
s
=
[
256
,
256
],
out_channel
s
=
[
256
,
256
],
),
pts_bbox_head
=
dict
(
type
=
'SECONDHead'
,
...
...
configs/kitti/dv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py
View file @
bd20e7b6
...
...
@@ -28,13 +28,13 @@ model = dict(
in_channels
=
64
,
layer_nums
=
[
3
,
5
,
5
],
layer_strides
=
[
2
,
2
,
2
],
num_filter
s
=
[
64
,
128
,
256
],
out_channel
s
=
[
64
,
128
,
256
],
),
neck
=
dict
(
type
=
'SECONDFPN'
,
in_channels
=
[
64
,
128
,
256
],
upsample_strides
=
[
1
,
2
,
4
],
num_upsample_filter
s
=
[
128
,
128
,
128
],
out_channel
s
=
[
128
,
128
,
128
],
),
bbox_head
=
dict
(
type
=
'SECONDHead'
,
...
...
configs/kitti/dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class.py
View file @
bd20e7b6
...
...
@@ -26,13 +26,13 @@ model = dict(
in_channels
=
256
,
layer_nums
=
[
5
,
5
],
layer_strides
=
[
1
,
2
],
num_filter
s
=
[
128
,
256
],
out_channel
s
=
[
128
,
256
],
),
neck
=
dict
(
type
=
'SECONDFPN'
,
in_channels
=
[
128
,
256
],
upsample_strides
=
[
1
,
2
],
num_upsample_filter
s
=
[
256
,
256
],
out_channel
s
=
[
256
,
256
],
),
bbox_head
=
dict
(
type
=
'SECONDHead'
,
...
...
configs/kitti/dv_second_secfpn_6x8_80e_kitti-3d-car.py
View file @
bd20e7b6
...
...
@@ -26,13 +26,13 @@ model = dict(
in_channels
=
256
,
layer_nums
=
[
5
,
5
],
layer_strides
=
[
1
,
2
],
num_filter
s
=
[
128
,
256
],
out_channel
s
=
[
128
,
256
],
),
neck
=
dict
(
type
=
'SECONDFPN'
,
in_channels
=
[
128
,
256
],
upsample_strides
=
[
1
,
2
],
num_upsample_filter
s
=
[
256
,
256
],
out_channel
s
=
[
256
,
256
],
),
bbox_head
=
dict
(
type
=
'SECONDHead'
,
...
...
configs/kitti/hv_PartA2_secfpn_4x8_cosine_80e_kitti-3d-3class.py
View file @
bd20e7b6
...
...
@@ -22,12 +22,12 @@ model = dict(
in_channels
=
256
,
layer_nums
=
[
5
,
5
],
layer_strides
=
[
1
,
2
],
num_filter
s
=
[
128
,
256
]),
out_channel
s
=
[
128
,
256
]),
neck
=
dict
(
type
=
'SECONDFPN'
,
in_channels
=
[
128
,
256
],
upsample_strides
=
[
1
,
2
],
num_upsample_filter
s
=
[
256
,
256
]),
out_channel
s
=
[
256
,
256
]),
rpn_head
=
dict
(
type
=
'PartA2RPNHead'
,
class_name
=
[
'Pedestrian'
,
'Cyclist'
,
'Car'
],
...
...
configs/kitti/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py
View file @
bd20e7b6
...
...
@@ -27,13 +27,13 @@ model = dict(
in_channels
=
64
,
layer_nums
=
[
3
,
5
,
5
],
layer_strides
=
[
2
,
2
,
2
],
num_filter
s
=
[
64
,
128
,
256
],
out_channel
s
=
[
64
,
128
,
256
],
),
neck
=
dict
(
type
=
'SECONDFPN'
,
in_channels
=
[
64
,
128
,
256
],
upsample_strides
=
[
1
,
2
,
4
],
num_upsample_filter
s
=
[
128
,
128
,
128
],
out_channel
s
=
[
128
,
128
,
128
],
),
bbox_head
=
dict
(
type
=
'SECONDHead'
,
...
...
configs/kitti/hv_second_secfpn_6x8_80e_kitti-3d-car.py
View file @
bd20e7b6
...
...
@@ -26,13 +26,13 @@ model = dict(
in_channels
=
256
,
layer_nums
=
[
5
,
5
],
layer_strides
=
[
1
,
2
],
num_filter
s
=
[
128
,
256
],
out_channel
s
=
[
128
,
256
],
),
neck
=
dict
(
type
=
'SECONDFPN'
,
in_channels
=
[
128
,
256
],
upsample_strides
=
[
1
,
2
],
num_upsample_filter
s
=
[
256
,
256
],
out_channel
s
=
[
256
,
256
],
),
bbox_head
=
dict
(
type
=
'SECONDHead'
,
...
...
configs/nus/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d.py
View file @
bd20e7b6
...
...
@@ -34,14 +34,14 @@ model = dict(
norm_cfg
=
dict
(
type
=
'naiveSyncBN2d'
,
eps
=
1e-3
,
momentum
=
0.01
),
layer_nums
=
[
3
,
5
,
5
],
layer_strides
=
[
2
,
2
,
2
],
num_filter
s
=
[
64
,
128
,
256
],
out_channel
s
=
[
64
,
128
,
256
],
),
pts_neck
=
dict
(
type
=
'SECONDFPN'
,
norm_cfg
=
dict
(
type
=
'naiveSyncBN2d'
,
eps
=
1e-3
,
momentum
=
0.01
),
in_channels
=
[
64
,
128
,
256
],
upsample_strides
=
[
1
,
2
,
4
],
num_upsample_filter
s
=
[
128
,
128
,
128
],
out_channel
s
=
[
128
,
128
,
128
],
),
pts_bbox_head
=
dict
(
type
=
'Anchor3DVeloHead'
,
...
...
mmdet3d/datasets/pipelines/formating.py
View file @
bd20e7b6
...
...
@@ -40,7 +40,8 @@ class DefaultFormatBundle(object):
results
[
'img'
]
=
DC
(
to_tensor
(
img
),
stack
=
True
)
for
key
in
[
'proposals'
,
'gt_bboxes'
,
'gt_bboxes_3d'
,
'gt_bboxes_ignore'
,
'gt_labels'
,
'gt_labels_3d'
'gt_labels'
,
'gt_labels_3d'
,
'pts_instance_mask'
,
'pts_semantic_mask'
]:
if
key
not
in
results
:
continue
...
...
mmdet3d/datasets/pipelines/indoor_augment.py
View file @
bd20e7b6
...
...
@@ -133,7 +133,7 @@ class IndoorGlobalRotScale(object):
def
__init__
(
self
,
use_height
=
True
,
rot_range
=
None
,
scale_range
=
None
):
self
.
use_height
=
use_height
self
.
rot_range
=
rot_range
self
.
rot_range
=
np
.
pi
*
np
.
array
(
rot_range
)
self
.
scale_range
=
scale_range
def
_rotz
(
self
,
t
):
...
...
mmdet3d/datasets/pipelines/indoor_loading.py
View file @
bd20e7b6
...
...
@@ -92,8 +92,8 @@ class IndoorLoadAnnotations3D(object):
mmcv
.
check_file_exist
(
pts_instance_mask_path
)
mmcv
.
check_file_exist
(
pts_semantic_mask_path
)
pts_instance_mask
=
np
.
load
(
pts_instance_mask_path
)
pts_semantic_mask
=
np
.
load
(
pts_semantic_mask_path
)
pts_instance_mask
=
np
.
load
(
pts_instance_mask_path
)
.
astype
(
np
.
int
)
pts_semantic_mask
=
np
.
load
(
pts_semantic_mask_path
)
.
astype
(
np
.
int
)
results
[
'pts_instance_mask'
]
=
pts_instance_mask
results
[
'pts_semantic_mask'
]
=
pts_semantic_mask
...
...
mmdet3d/models/backbones/second.py
View file @
bd20e7b6
from
functools
import
partial
import
torch.nn
as
nn
from
mmcv.cnn
import
build_norm_layer
from
mmcv.cnn
import
build_conv_layer
,
build_norm_layer
from
mmcv.runner
import
load_checkpoint
from
mmdet.models
import
BACKBONES
class
Empty
(
nn
.
Module
):
def
__init__
(
self
,
*
args
,
**
kwargs
):
super
(
Empty
,
self
).
__init__
()
def
forward
(
self
,
*
args
,
**
kwargs
):
if
len
(
args
)
==
1
:
return
args
[
0
]
elif
len
(
args
)
==
0
:
return
None
return
args
@
BACKBONES
.
register_module
()
class
SECOND
(
nn
.
Module
):
"""Compare with RPN, RPNV2 support arbitrary number of stage.
"""Backbone network for SECOND/PointPillars/PartA2/MVXNet
Args:
in_channels (int): Input channels
out_channels (list[int]): Output channels for multi-scale feature maps
layer_nums (list[int]): Number of layers in each stage
layer_strides (list[int]): Strides of each stage
norm_cfg (dict): Config dict of normalization layers
conv_cfg (dict): Config dict of convolutional layers
"""
def
__init__
(
self
,
in_channels
=
128
,
out_channels
=
[
128
,
128
,
256
],
layer_nums
=
[
3
,
5
,
5
],
layer_strides
=
[
2
,
2
,
2
],
n
um_filters
=
[
128
,
128
,
256
]
,
norm
_cfg
=
dict
(
type
=
'
BN'
,
eps
=
1e-3
,
momentum
=
0.01
)):
n
orm_cfg
=
dict
(
type
=
'BN'
,
eps
=
1e-3
,
momentum
=
0.01
)
,
conv
_cfg
=
dict
(
type
=
'
Conv2d'
,
bias
=
False
)):
super
(
SECOND
,
self
).
__init__
()
assert
len
(
layer_strides
)
==
len
(
layer_nums
)
assert
len
(
num_filters
)
==
len
(
layer_nums
)
if
norm_cfg
is
not
None
:
Conv2d
=
partial
(
nn
.
Conv2d
,
bias
=
False
)
else
:
Conv2d
=
partial
(
nn
.
Conv2d
,
bias
=
True
)
assert
len
(
out_channels
)
==
len
(
layer_nums
)
in_filters
=
[
in_channels
,
*
num_filter
s
[:
-
1
]]
in_filters
=
[
in_channels
,
*
out_channel
s
[:
-
1
]]
# note that when stride > 1, conv2d with same padding isn't
# equal to pad-conv2d. we should use pad-conv2d.
blocks
=
[]
for
i
,
layer_num
in
enumerate
(
layer_nums
):
norm_layer
=
(
build_norm_layer
(
norm_cfg
,
num_filters
[
i
])[
1
]
if
norm_cfg
is
not
None
else
Empty
)
block
=
[
nn
.
ZeroPad2d
(
1
),
Conv2d
(
in_filters
[
i
],
num_filters
[
i
],
3
,
stride
=
layer_strides
[
i
]),
norm_layer
,
build_conv_layer
(
conv_cfg
,
in_filters
[
i
],
out_channels
[
i
],
3
,
stride
=
layer_strides
[
i
],
padding
=
1
),
build_norm_layer
(
norm_cfg
,
out_channels
[
i
])[
1
],
nn
.
ReLU
(
inplace
=
True
),
]
for
j
in
range
(
layer_num
):
norm_layer
=
(
build_norm_layer
(
norm_cfg
,
num_filters
[
i
])[
1
]
if
norm_cfg
is
not
None
else
Empty
)
block
.
append
(
Conv2d
(
num_filters
[
i
],
num_filters
[
i
],
3
,
padding
=
1
))
block
.
append
(
norm_layer
)
build_conv_layer
(
conv_cfg
,
out_channels
[
i
],
out_channels
[
i
],
3
,
padding
=
1
))
block
.
append
(
build_norm_layer
(
norm_cfg
,
out_channels
[
i
])[
1
])
block
.
append
(
nn
.
ReLU
(
inplace
=
True
))
block
=
nn
.
Sequential
(
*
block
)
...
...
@@ -71,6 +62,8 @@ class SECOND(nn.Module):
self
.
blocks
=
nn
.
ModuleList
(
blocks
)
def
init_weights
(
self
,
pretrained
=
None
):
# Do not initialize the conv layers
# to follow the original implementation
if
isinstance
(
pretrained
,
str
):
from
mmdet3d.utils
import
get_root_logger
logger
=
get_root_logger
()
...
...
mmdet3d/models/necks/second_fpn.py
View file @
bd20e7b6
from
functools
import
partial
import
torch
import
torch.nn
as
nn
from
mmcv.cnn
import
build_norm_layer
,
constant_init
,
kaiming_init
from
torch.nn
import
Sequential
from
torch.nn.modules.batchnorm
import
_BatchNorm
from
mmcv.cnn
import
(
build_norm_layer
,
build_upsample_layer
,
constant_init
,
is_norm
,
kaiming_init
)
from
mmdet.models
import
NECKS
from
..
import
builder
...
...
@@ -12,36 +9,40 @@ from .. import builder
@
NECKS
.
register_module
()
class
SECONDFPN
(
nn
.
Module
):
"""Compare with RPN, RPNV2 support arbitrary number of stage.
"""FPN used in SECOND/PointPillars/PartA2/MVXNet
Args:
in_channels (list[int]): Input channels of multi-scale feature maps
out_channels (list[int]): Output channels of feature maps
upsample_strides (list[int]): Strides used to upsample the feature maps
norm_cfg (dict): Config dict of normalization layers
upsample_cfg (dict): Config dict of upsample layers
"""
def
__init__
(
self
,
use_norm
=
True
,
in_channels
=
[
128
,
128
,
256
],
out_channels
=
[
256
,
256
,
256
],
upsample_strides
=
[
1
,
2
,
4
],
n
um_upsample_filters
=
[
256
,
256
,
256
]
,
norm
_cfg
=
dict
(
type
=
'
BN'
,
eps
=
1e-3
,
momentum
=
0.01
)):
n
orm_cfg
=
dict
(
type
=
'BN'
,
eps
=
1e-3
,
momentum
=
0.01
)
,
upsample
_cfg
=
dict
(
type
=
'
deconv'
,
bias
=
False
)):
# if for GroupNorm,
# cfg is dict(type='GN', num_groups=num_groups, eps=1e-3, affine=True)
super
(
SECONDFPN
,
self
).
__init__
()
assert
len
(
num_upsample_filter
s
)
==
len
(
upsample_strides
)
assert
len
(
out_channel
s
)
==
len
(
upsample_strides
)
==
len
(
in_channels
)
self
.
in_channels
=
in_channels
ConvTranspose2d
=
partial
(
nn
.
ConvTranspose2d
,
bias
=
False
)
self
.
out_channels
=
out_channels
deblocks
=
[]
for
i
,
num_upsample_filter
in
enumerate
(
num_upsample_filters
):
norm_layer
=
build_norm_layer
(
norm_cfg
,
num_upsample_filter
)[
1
]
deblock
=
Sequential
(
ConvTranspose2d
(
in_channels
[
i
],
num_upsample_filter
,
upsample_strides
[
i
],
stride
=
upsample_strides
[
i
]),
norm_layer
,
nn
.
ReLU
(
inplace
=
True
),
)
for
i
,
out_channel
in
enumerate
(
out_channels
):
upsample_layer
=
build_upsample_layer
(
upsample_cfg
,
in_channels
=
in_channels
[
i
],
out_channels
=
out_channel
,
kernel_size
=
upsample_strides
[
i
],
stride
=
upsample_strides
[
i
])
deblock
=
nn
.
Sequential
(
upsample_layer
,
build_norm_layer
(
norm_cfg
,
out_channel
)[
1
],
nn
.
ReLU
(
inplace
=
True
))
deblocks
.
append
(
deblock
)
self
.
deblocks
=
nn
.
ModuleList
(
deblocks
)
...
...
@@ -49,7 +50,7 @@ class SECONDFPN(nn.Module):
for
m
in
self
.
modules
():
if
isinstance
(
m
,
nn
.
Conv2d
):
kaiming_init
(
m
)
elif
is
instance
(
m
,
(
_BatchNorm
,
nn
.
GroupNorm
)
):
elif
is
_norm
(
m
):
constant_init
(
m
,
1
)
def
forward
(
self
,
x
):
...
...
@@ -65,30 +66,34 @@ class SECONDFPN(nn.Module):
@
NECKS
.
register_module
()
class
SECONDFusionFPN
(
SECONDFPN
):
"""Compare with RPN, RPNV2 support arbitrary number of stage.
"""FPN used in multi-modality SECOND/PointPillars
Args:
in_channels (list[int]): Input channels of multi-scale feature maps
out_channels (list[int]): Output channels of feature maps
upsample_strides (list[int]): Strides used to upsample the feature maps
norm_cfg (dict): Config dict of normalization layers
upsample_cfg (dict): Config dict of upsample layers
downsample_rates (list[int]): The downsample rate of feature map in
comparison to the original voxelization input
fusion_layer (dict): Config dict of fusion layers
"""
def
__init__
(
self
,
use_norm
=
True
,
in_channels
=
[
128
,
128
,
256
],
out_channels
=
[
256
,
256
,
256
],
upsample_strides
=
[
1
,
2
,
4
],
num_upsample_filters
=
[
256
,
256
,
256
],
norm_cfg
=
dict
(
type
=
'BN'
,
eps
=
1e-3
,
momentum
=
0.01
),
down_sample_rate
=
[
40
,
8
,
8
],
fusion_layer
=
None
,
cat_points
=
False
):
super
(
SECONDFusionFPN
,
self
).
__init__
(
use_norm
,
in_channels
,
upsample_strides
,
num_upsample_filters
,
norm_cfg
,
)
upsample_cfg
=
dict
(
type
=
'deconv'
,
bias
=
False
),
downsample_rates
=
[
40
,
8
,
8
],
fusion_layer
=
None
):
super
(
SECONDFusionFPN
,
self
).
__init__
(
in_channels
,
out_channels
,
upsample_strides
,
norm_cfg
,
upsample_cfg
)
self
.
fusion_layer
=
None
if
fusion_layer
is
not
None
:
self
.
fusion_layer
=
builder
.
build_fusion_layer
(
fusion_layer
)
self
.
cat_points
=
cat_points
self
.
down_sample_rate
=
down_sample_rate
self
.
downsample_rates
=
downsample_rates
def
forward
(
self
,
x
,
...
...
@@ -107,11 +112,11 @@ class SECONDFusionFPN(SECONDFPN):
downsample_pts_coors
=
torch
.
zeros_like
(
coors
)
downsample_pts_coors
[:,
0
]
=
coors
[:,
0
]
downsample_pts_coors
[:,
1
]
=
(
coors
[:,
1
]
/
self
.
down
_
sample_rate
[
0
])
coors
[:,
1
]
/
self
.
downsample_rate
s
[
0
])
downsample_pts_coors
[:,
2
]
=
(
coors
[:,
2
]
/
self
.
down
_
sample_rate
[
1
])
coors
[:,
2
]
/
self
.
downsample_rate
s
[
1
])
downsample_pts_coors
[:,
3
]
=
(
coors
[:,
3
]
/
self
.
down
_
sample_rate
[
2
])
coors
[:,
3
]
/
self
.
downsample_rate
s
[
2
])
# fusion for each point
out
=
self
.
fusion_layer
(
img_feats
,
points
,
out
,
downsample_pts_coors
,
img_meta
)
...
...
requirements/build.txt
View file @
bd20e7b6
# These must be installed before building mmdetection
numpy
torch>=1.
1
torch>=1.
3
requirements/runtime.txt
View file @
bd20e7b6
matplotlib
mmcv>=0.5.
0
mmcv>=0.5.
1
numba==0.45.1
numpy
# need older pillow until torchvision is fixed
...
...
tests/test_fpn.py
0 → 100644
View file @
bd20e7b6
import
pytest
def
test_secfpn
():
neck_cfg
=
dict
(
type
=
'SECONDFPN'
,
in_channels
=
[
2
,
3
],
upsample_strides
=
[
1
,
2
],
out_channels
=
[
4
,
6
],
)
from
mmdet.models.builder
import
build_neck
neck
=
build_neck
(
neck_cfg
)
assert
neck
.
deblocks
[
0
][
0
].
in_channels
==
2
assert
neck
.
deblocks
[
1
][
0
].
in_channels
==
3
assert
neck
.
deblocks
[
0
][
0
].
out_channels
==
4
assert
neck
.
deblocks
[
1
][
0
].
out_channels
==
6
assert
neck
.
deblocks
[
0
][
0
].
stride
==
(
1
,
1
)
assert
neck
.
deblocks
[
1
][
0
].
stride
==
(
2
,
2
)
assert
neck
is
not
None
neck_cfg
=
dict
(
type
=
'SECONDFPN'
,
in_channels
=
[
2
,
2
],
upsample_strides
=
[
1
,
2
,
4
],
out_channels
=
[
2
,
2
],
)
with
pytest
.
raises
(
AssertionError
):
build_neck
(
neck_cfg
)
neck_cfg
=
dict
(
type
=
'SECONDFPN'
,
in_channels
=
[
2
,
2
,
4
],
upsample_strides
=
[
1
,
2
,
4
],
out_channels
=
[
2
,
2
],
)
with
pytest
.
raises
(
AssertionError
):
build_neck
(
neck_cfg
)
tests/test_indoor_augment.py
→
tests/test_
pipeline/test_
indoor_augment.py
View file @
bd20e7b6
...
...
@@ -64,7 +64,7 @@ def test_indoor_flip_data():
def
test_global_rot_scale
():
np
.
random
.
seed
(
0
)
sunrgbd_augment
=
IndoorGlobalRotScale
(
True
,
rot_range
=
[
-
np
.
pi
/
6
,
np
.
pi
/
6
],
scale_range
=
[
0.85
,
1.15
])
True
,
rot_range
=
[
-
1
/
6
,
1
/
6
],
scale_range
=
[
0.85
,
1.15
])
sunrgbd_results
=
dict
()
sunrgbd_results
[
'points'
]
=
np
.
array
(
[[
1.02828765e+00
,
3.65790772e+00
,
1.97294697e-01
,
1.61959505e+00
],
...
...
@@ -101,7 +101,7 @@ def test_global_rot_scale():
np
.
random
.
seed
(
0
)
scannet_augment
=
IndoorGlobalRotScale
(
True
,
rot_range
=
[
-
np
.
pi
*
1
/
36
,
np
.
pi
*
1
/
36
],
scale_range
=
None
)
True
,
rot_range
=
[
-
1
*
1
/
36
,
1
/
36
],
scale_range
=
None
)
scannet_results
=
dict
()
scannet_results
[
'points'
]
=
np
.
array
(
[[
1.6110241e+00
,
-
1.6903955e-01
,
5.8115810e-01
,
5.9897250e-01
],
...
...
tests/test_indoor_loading.py
→
tests/test_
pipeline/test_
indoor_loading.py
View file @
bd20e7b6
...
...
@@ -39,15 +39,16 @@ def test_load_annotations3D():
sunrgbd_info
=
mmcv
.
load
(
'./tests/data/sunrgbd/sunrgbd_infos.pkl'
)[
0
]
if
sunrgbd_info
[
'annos'
][
'gt_num'
]
!=
0
:
sunrgbd_gt_bboxes_3d
=
sunrgbd_info
[
'annos'
][
'gt_boxes_upright_depth'
]
sunrgbd_gt_labels
=
sunrgbd_info
[
'annos'
][
'class'
].
reshape
(
-
1
,
1
)
sunrgbd_gt_bboxes_3d_mask
=
np
.
ones_like
(
sunrgbd_gt_labels
)
sunrgbd_gt_labels_3d
=
sunrgbd_info
[
'annos'
][
'class'
]
sunrgbd_gt_bboxes_3d_mask
=
np
.
ones_like
(
sunrgbd_gt_labels_3d
,
dtype
=
np
.
bool
)
else
:
sunrgbd_gt_bboxes_3d
=
np
.
zeros
((
1
,
6
),
dtype
=
np
.
float32
)
sunrgbd_gt_labels
=
np
.
zeros
((
1
,
1
))
sunrgbd_gt_bboxes_3d_mask
=
np
.
zeros
((
1
,
1
))
sunrgbd_gt_labels
_3d
=
np
.
zeros
((
1
,
))
sunrgbd_gt_bboxes_3d_mask
=
np
.
zeros
((
1
,
)
,
dtype
=
np
.
bool
)
assert
sunrgbd_gt_bboxes_3d
.
shape
==
(
3
,
7
)
assert
sunrgbd_gt_labels
.
shape
==
(
3
,
1
)
assert
sunrgbd_gt_bboxes_3d_mask
.
shape
==
(
3
,
1
)
assert
sunrgbd_gt_labels
_3d
.
shape
==
(
3
,
)
assert
sunrgbd_gt_bboxes_3d_mask
.
shape
==
(
3
,
)
scannet_info
=
mmcv
.
load
(
'./tests/data/scannet/scannet_infos.pkl'
)[
0
]
scannet_load_annotations3D
=
IndoorLoadAnnotations3D
()
...
...
@@ -55,29 +56,29 @@ def test_load_annotations3D():
data_path
=
'./tests/data/scannet/scannet_train_instance_data'
if
scannet_info
[
'annos'
][
'gt_num'
]
!=
0
:
scannet_gt_bboxes_3d
=
scannet_info
[
'annos'
][
'gt_boxes_upright_depth'
]
scannet_gt_labels
=
scannet_info
[
'annos'
][
'class'
].
reshape
(
-
1
,
1
)
scannet_gt_bboxes_3d_mask
=
np
.
ones_like
(
scannet_gt_labels
)
scannet_gt_labels_3d
=
scannet_info
[
'annos'
][
'class'
]
scannet_gt_bboxes_3d_mask
=
np
.
ones_like
(
scannet_gt_labels_3d
,
dtype
=
np
.
bool
)
else
:
scannet_gt_bboxes_3d
=
np
.
zeros
((
1
,
6
),
dtype
=
np
.
float32
)
scannet_gt_labels
=
np
.
zeros
((
1
,
1
))
scannet_gt_bboxes_3d_mask
=
np
.
zeros
((
1
,
1
))
scannet_gt_labels
_3d
=
np
.
zeros
((
1
,
))
scannet_gt_bboxes_3d_mask
=
np
.
zeros
((
1
,
)
,
dtype
=
np
.
bool
)
scan_name
=
scannet_info
[
'point_cloud'
][
'lidar_idx'
]
scannet_results
[
'pts_instance_mask_path'
]
=
osp
.
join
(
data_path
,
f
'
{
scan_name
}
_ins_label.npy'
)
scannet_results
[
'pts_semantic_mask_path'
]
=
osp
.
join
(
data_path
,
f
'
{
scan_name
}
_sem_label.npy'
)
scannet_results
[
'info'
]
=
scannet_info
scannet_results
[
'gt_bboxes_3d'
]
=
scannet_gt_bboxes_3d
scannet_results
[
'gt_labels'
]
=
scannet_gt_labels
scannet_results
[
'gt_labels
_3d
'
]
=
scannet_gt_labels
_3d
scannet_results
[
'gt_bboxes_3d_mask'
]
=
scannet_gt_bboxes_3d_mask
scannet_results
=
scannet_load_annotations3D
(
scannet_results
)
scannet_gt_boxes
=
scannet_results
[
'gt_bboxes_3d'
]
scannet_gt_lbaels
=
scannet_results
[
'gt_labels'
]
scannet_gt_lbaels
=
scannet_results
[
'gt_labels
_3d
'
]
scannet_gt_boxes_mask
=
scannet_results
[
'gt_bboxes_3d_mask'
]
scannet_pts_instance_mask
=
scannet_results
[
'pts_instance_mask'
]
scannet_pts_semantic_mask
=
scannet_results
[
'pts_semantic_mask'
]
assert
scannet_gt_boxes
.
shape
==
(
27
,
6
)
assert
scannet_gt_lbaels
.
shape
==
(
27
,
1
)
assert
scannet_gt_boxes_mask
.
shape
==
(
27
,
1
)
assert
scannet_gt_lbaels
.
shape
==
(
27
,
)
assert
scannet_gt_boxes_mask
.
shape
==
(
27
,
)
assert
scannet_pts_instance_mask
.
shape
==
(
100
,
)
assert
scannet_pts_semantic_mask
.
shape
==
(
100
,
)
tests/test_pipeline/test_indoor_pipeline.py
0 → 100644
View file @
bd20e7b6
import
os.path
as
osp
import
mmcv
import
numpy
as
np
from
mmdet3d.datasets.pipelines
import
Compose
def
test_scannet_pipeline
():
class_names
=
(
'cabinet'
,
'bed'
,
'chair'
,
'sofa'
,
'table'
,
'door'
,
'window'
,
'bookshelf'
,
'picture'
,
'counter'
,
'desk'
,
'curtain'
,
'refrigerator'
,
'showercurtrain'
,
'toilet'
,
'sink'
,
'bathtub'
,
'garbagebin'
)
np
.
random
.
seed
(
0
)
pipelines
=
[
dict
(
type
=
'IndoorLoadPointsFromFile'
,
use_height
=
True
,
load_dim
=
6
,
use_dim
=
[
0
,
1
,
2
]),
dict
(
type
=
'IndoorLoadAnnotations3D'
),
dict
(
type
=
'IndoorPointSample'
,
num_points
=
5
),
dict
(
type
=
'IndoorFlipData'
,
flip_ratio_yz
=
1.0
,
flip_ratio_xz
=
1.0
),
dict
(
type
=
'IndoorGlobalRotScale'
,
use_height
=
True
,
rot_range
=
[
-
1
/
36
,
1
/
36
],
scale_range
=
None
),
dict
(
type
=
'DefaultFormatBundle3D'
,
class_names
=
class_names
),
dict
(
type
=
'Collect3D'
,
keys
=
[
'points'
,
'gt_bboxes_3d'
,
'gt_labels_3d'
,
'pts_semantic_mask'
,
'pts_instance_mask'
]),
]
pipeline
=
Compose
(
pipelines
)
info
=
mmcv
.
load
(
'./tests/data/scannet/scannet_infos.pkl'
)[
0
]
results
=
dict
()
data_path
=
'./tests/data/scannet/scannet_train_instance_data'
results
[
'data_path'
]
=
data_path
scan_name
=
info
[
'point_cloud'
][
'lidar_idx'
]
results
[
'pts_filename'
]
=
osp
.
join
(
data_path
,
f
'
{
scan_name
}
_vert.npy'
)
if
info
[
'annos'
][
'gt_num'
]
!=
0
:
scannet_gt_bboxes_3d
=
info
[
'annos'
][
'gt_boxes_upright_depth'
]
scannet_gt_labels_3d
=
info
[
'annos'
][
'class'
]
scannet_gt_bboxes_3d_mask
=
np
.
ones_like
(
scannet_gt_labels_3d
,
dtype
=
np
.
bool
)
else
:
scannet_gt_bboxes_3d
=
np
.
zeros
((
1
,
6
),
dtype
=
np
.
float32
)
scannet_gt_labels_3d
=
np
.
zeros
((
1
,
))
scannet_gt_bboxes_3d_mask
=
np
.
zeros
((
1
,
),
dtype
=
np
.
bool
)
scan_name
=
info
[
'point_cloud'
][
'lidar_idx'
]
results
[
'pts_instance_mask_path'
]
=
osp
.
join
(
data_path
,
f
'
{
scan_name
}
_ins_label.npy'
)
results
[
'pts_semantic_mask_path'
]
=
osp
.
join
(
data_path
,
f
'
{
scan_name
}
_sem_label.npy'
)
results
[
'gt_bboxes_3d'
]
=
scannet_gt_bboxes_3d
results
[
'gt_labels_3d'
]
=
scannet_gt_labels_3d
results
[
'gt_bboxes_3d_mask'
]
=
scannet_gt_bboxes_3d_mask
results
=
pipeline
(
results
)
points
=
results
[
'points'
].
_data
gt_bboxes_3d
=
results
[
'gt_bboxes_3d'
].
_data
gt_labels_3d
=
results
[
'gt_labels_3d'
].
_data
pts_semantic_mask
=
results
[
'pts_semantic_mask'
].
_data
pts_instance_mask
=
results
[
'pts_instance_mask'
].
_data
expected_points
=
np
.
array
(
[[
-
2.9078157
,
-
1.9569951
,
2.3543026
,
2.389488
],
[
-
0.71360034
,
-
3.4359822
,
2.1330001
,
2.1681855
],
[
-
1.332374
,
1.474838
,
-
0.04405887
,
-
0.00887359
],
[
2.1336637
,
-
1.3265059
,
-
0.02880373
,
0.00638155
],
[
0.43895668
,
-
3.0259454
,
1.5560012
,
1.5911865
]])
expected_gt_bboxes_3d
=
np
.
array
([
[
-
1.5005362
,
-
3.512584
,
1.8565295
,
1.7457027
,
0.24149807
,
0.57235193
],
[
-
2.8848705
,
3.4961755
,
1.5268247
,
0.66170084
,
0.17433672
,
0.67153597
],
[
-
1.1585636
,
-
2.192365
,
0.61649567
,
0.5557011
,
2.5375574
,
1.2144762
],
[
-
2.930457
,
-
2.4856408
,
0.9722377
,
0.6270478
,
1.8461524
,
0.28697443
],
[
3.3114715
,
-
0.00476722
,
1.0712197
,
0.46191898
,
3.8605113
,
2.1603441
]
])
expected_gt_labels_3d
=
np
.
array
([
6
,
6
,
4
,
9
,
11
,
11
,
10
,
0
,
15
,
17
,
17
,
17
,
3
,
12
,
4
,
4
,
14
,
1
,
0
,
0
,
0
,
0
,
0
,
0
,
5
,
5
,
5
])
expected_pts_semantic_mask
=
np
.
array
([
3
,
1
,
2
,
2
,
15
])
expected_pts_instance_mask
=
np
.
array
([
44
,
22
,
10
,
10
,
57
])
assert
np
.
allclose
(
points
,
expected_points
)
assert
np
.
allclose
(
gt_bboxes_3d
[:
5
,
:],
expected_gt_bboxes_3d
)
assert
np
.
all
(
gt_labels_3d
.
numpy
()
==
expected_gt_labels_3d
)
assert
np
.
all
(
pts_semantic_mask
.
numpy
()
==
expected_pts_semantic_mask
)
assert
np
.
all
(
pts_instance_mask
.
numpy
()
==
expected_pts_instance_mask
)
def
test_sunrgbd_pipeline
():
class_names
=
(
'bed'
,
'table'
,
'sofa'
,
'chair'
,
'toilet'
,
'desk'
,
'dresser'
,
'night_stand'
,
'bookshelf'
,
'bathtub'
)
np
.
random
.
seed
(
0
)
pipelines
=
[
dict
(
type
=
'IndoorLoadPointsFromFile'
,
use_height
=
True
,
load_dim
=
6
,
use_dim
=
[
0
,
1
,
2
]),
dict
(
type
=
'IndoorFlipData'
,
flip_ratio_yz
=
1.0
),
dict
(
type
=
'IndoorGlobalRotScale'
,
use_height
=
True
,
rot_range
=
[
-
1
/
6
,
1
/
6
],
scale_range
=
[
0.85
,
1.15
]),
dict
(
type
=
'IndoorPointSample'
,
num_points
=
5
),
dict
(
type
=
'DefaultFormatBundle3D'
,
class_names
=
class_names
),
dict
(
type
=
'Collect3D'
,
keys
=
[
'points'
,
'gt_bboxes_3d'
,
'gt_labels_3d'
]),
]
pipeline
=
Compose
(
pipelines
)
results
=
dict
()
info
=
mmcv
.
load
(
'./tests/data/sunrgbd/sunrgbd_infos.pkl'
)[
0
]
data_path
=
'./tests/data/sunrgbd/sunrgbd_trainval'
scan_name
=
info
[
'point_cloud'
][
'lidar_idx'
]
results
[
'pts_filename'
]
=
osp
.
join
(
data_path
,
'lidar'
,
f
'
{
scan_name
:
06
d
}
.npy'
)
if
info
[
'annos'
][
'gt_num'
]
!=
0
:
gt_bboxes_3d
=
info
[
'annos'
][
'gt_boxes_upright_depth'
]
gt_labels_3d
=
info
[
'annos'
][
'class'
]
gt_bboxes_3d_mask
=
np
.
ones_like
(
gt_labels_3d
,
dtype
=
np
.
bool
)
else
:
gt_bboxes_3d
=
np
.
zeros
((
1
,
6
),
dtype
=
np
.
float32
)
gt_labels_3d
=
np
.
zeros
((
1
,
))
gt_bboxes_3d_mask
=
np
.
zeros
((
1
,
),
dtype
=
np
.
bool
)
results
[
'gt_bboxes_3d'
]
=
gt_bboxes_3d
results
[
'gt_labels_3d'
]
=
gt_labels_3d
results
[
'gt_bboxes_3d_mask'
]
=
gt_bboxes_3d_mask
results
=
pipeline
(
results
)
points
=
results
[
'points'
].
_data
gt_bboxes_3d
=
results
[
'gt_bboxes_3d'
].
_data
gt_labels_3d
=
results
[
'gt_labels_3d'
].
_data
expected_points
=
np
.
array
(
[[
0.6570105
,
1.5538014
,
0.24514851
,
1.0165423
],
[
0.656101
,
1.558591
,
0.21755838
,
0.98895216
],
[
0.6293659
,
1.5679953
,
-
0.10004003
,
0.67135376
],
[
0.6068739
,
1.5974995
,
-
0.41063973
,
0.36075398
],
[
0.6464709
,
1.5573514
,
0.15114647
,
0.9225402
]])
expected_gt_bboxes_3d
=
np
.
array
([[
-
2.012483
,
3.9473376
,
-
0.25446942
,
2.3730404
,
1.9457763
,
2.0303352
,
1.2205974
],
[
-
3.7036808
,
4.2396426
,
-
0.81091917
,
0.6032123
,
0.91040343
,
1.003341
,
1.2662518
],
[
0.6528646
,
2.1638472
,
-
0.15228128
,
0.7347852
,
1.6113238
,
2.1694272
,
2.81404
]])
expected_gt_labels_3d
=
np
.
array
([
0
,
7
,
6
])
assert
np
.
allclose
(
gt_bboxes_3d
,
expected_gt_bboxes_3d
)
assert
np
.
allclose
(
gt_labels_3d
.
flatten
(),
expected_gt_labels_3d
)
assert
np
.
allclose
(
points
,
expected_points
)
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment