Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
f95de58b
Commit
f95de58b
authored
May 11, 2020
by
zhangwenwei
Browse files
Fix configs
parent
3337fa69
Changes
10
Show whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
231 additions
and
20 deletions
+231
-20
configs/kitti/dv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py
...igs/kitti/dv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py
+1
-1
configs/kitti/dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class.py
.../kitti/dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class.py
+1
-1
configs/kitti/dv_second_secfpn_6x8_80e_kitti-3d-car.py
configs/kitti/dv_second_secfpn_6x8_80e_kitti-3d-car.py
+1
-1
configs/kitti/hv_PartA2_secfpn_4x8_cosine_80e_kitti-3d-3class.py
.../kitti/hv_PartA2_secfpn_4x8_cosine_80e_kitti-3d-3class.py
+1
-1
configs/kitti/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py
...igs/kitti/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py
+1
-1
configs/kitti/hv_second_secfpn_6x8_80e_kitti-3d-car.py
configs/kitti/hv_second_secfpn_6x8_80e_kitti-3d-car.py
+1
-1
configs/nus/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d.py
configs/nus/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d.py
+217
-0
mmdet3d/models/necks/second_fpn.py
mmdet3d/models/necks/second_fpn.py
+6
-12
requirements/build.txt
requirements/build.txt
+1
-1
requirements/runtime.txt
requirements/runtime.txt
+1
-1
No files found.
configs/kitti/dv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py
View file @
f95de58b
...
@@ -34,7 +34,7 @@ model = dict(
...
@@ -34,7 +34,7 @@ model = dict(
type
=
'SECONDFPN'
,
type
=
'SECONDFPN'
,
in_channels
=
[
64
,
128
,
256
],
in_channels
=
[
64
,
128
,
256
],
upsample_strides
=
[
1
,
2
,
4
],
upsample_strides
=
[
1
,
2
,
4
],
num_upsample_filter
s
=
[
128
,
128
,
128
],
out_channel
s
=
[
128
,
128
,
128
],
),
),
bbox_head
=
dict
(
bbox_head
=
dict
(
type
=
'SECONDHead'
,
type
=
'SECONDHead'
,
...
...
configs/kitti/dv_second_secfpn_2x8_cosine_80e_kitti-3d-3class.py
View file @
f95de58b
...
@@ -32,7 +32,7 @@ model = dict(
...
@@ -32,7 +32,7 @@ model = dict(
type
=
'SECONDFPN'
,
type
=
'SECONDFPN'
,
in_channels
=
[
128
,
256
],
in_channels
=
[
128
,
256
],
upsample_strides
=
[
1
,
2
],
upsample_strides
=
[
1
,
2
],
num_upsample_filter
s
=
[
256
,
256
],
out_channel
s
=
[
256
,
256
],
),
),
bbox_head
=
dict
(
bbox_head
=
dict
(
type
=
'SECONDHead'
,
type
=
'SECONDHead'
,
...
...
configs/kitti/dv_second_secfpn_6x8_80e_kitti-3d-car.py
View file @
f95de58b
...
@@ -32,7 +32,7 @@ model = dict(
...
@@ -32,7 +32,7 @@ model = dict(
type
=
'SECONDFPN'
,
type
=
'SECONDFPN'
,
in_channels
=
[
128
,
256
],
in_channels
=
[
128
,
256
],
upsample_strides
=
[
1
,
2
],
upsample_strides
=
[
1
,
2
],
num_upsample_filter
s
=
[
256
,
256
],
out_channel
s
=
[
256
,
256
],
),
),
bbox_head
=
dict
(
bbox_head
=
dict
(
type
=
'SECONDHead'
,
type
=
'SECONDHead'
,
...
...
configs/kitti/hv_PartA2_secfpn_4x8_cosine_80e_kitti-3d-3class.py
View file @
f95de58b
...
@@ -27,7 +27,7 @@ model = dict(
...
@@ -27,7 +27,7 @@ model = dict(
type
=
'SECONDFPN'
,
type
=
'SECONDFPN'
,
in_channels
=
[
128
,
256
],
in_channels
=
[
128
,
256
],
upsample_strides
=
[
1
,
2
],
upsample_strides
=
[
1
,
2
],
num_upsample_filter
s
=
[
256
,
256
]),
out_channel
s
=
[
256
,
256
]),
rpn_head
=
dict
(
rpn_head
=
dict
(
type
=
'PartA2RPNHead'
,
type
=
'PartA2RPNHead'
,
class_name
=
[
'Pedestrian'
,
'Cyclist'
,
'Car'
],
class_name
=
[
'Pedestrian'
,
'Cyclist'
,
'Car'
],
...
...
configs/kitti/hv_pointpillars_secfpn_6x8_160e_kitti-3d-car.py
View file @
f95de58b
...
@@ -33,7 +33,7 @@ model = dict(
...
@@ -33,7 +33,7 @@ model = dict(
type
=
'SECONDFPN'
,
type
=
'SECONDFPN'
,
in_channels
=
[
64
,
128
,
256
],
in_channels
=
[
64
,
128
,
256
],
upsample_strides
=
[
1
,
2
,
4
],
upsample_strides
=
[
1
,
2
,
4
],
num_upsample_filter
s
=
[
128
,
128
,
128
],
out_channel
s
=
[
128
,
128
,
128
],
),
),
bbox_head
=
dict
(
bbox_head
=
dict
(
type
=
'SECONDHead'
,
type
=
'SECONDHead'
,
...
...
configs/kitti/hv_second_secfpn_6x8_80e_kitti-3d-car.py
View file @
f95de58b
...
@@ -32,7 +32,7 @@ model = dict(
...
@@ -32,7 +32,7 @@ model = dict(
type
=
'SECONDFPN'
,
type
=
'SECONDFPN'
,
in_channels
=
[
128
,
256
],
in_channels
=
[
128
,
256
],
upsample_strides
=
[
1
,
2
],
upsample_strides
=
[
1
,
2
],
num_upsample_filter
s
=
[
256
,
256
],
out_channel
s
=
[
256
,
256
],
),
),
bbox_head
=
dict
(
bbox_head
=
dict
(
type
=
'SECONDHead'
,
type
=
'SECONDHead'
,
...
...
configs/nus/hv_pointpillars_fpn_sbn-all_4x8_2x_nus-3d.py
0 → 100644
View file @
f95de58b
# model settings
voxel_size
=
[
0.25
,
0.25
,
8
]
point_cloud_range
=
[
-
50
,
-
50
,
-
5
,
50
,
50
,
3
]
class_names
=
[
'car'
,
'truck'
,
'trailer'
,
'bus'
,
'construction_vehicle'
,
'bicycle'
,
'motorcycle'
,
'pedestrian'
,
'traffic_cone'
,
'barrier'
]
model
=
dict
(
type
=
'MVXFasterRCNNV2'
,
pts_voxel_layer
=
dict
(
max_num_points
=
64
,
# max_points_per_voxel
point_cloud_range
=
point_cloud_range
,
# velodyne coordinates, x, y, z
voxel_size
=
voxel_size
,
max_voxels
=
(
30000
,
40000
),
# (training, testing) max_coxels
),
pts_voxel_encoder
=
dict
(
type
=
'HardVFE'
,
num_input_features
=
4
,
num_filters
=
[
64
,
64
],
with_distance
=
False
,
voxel_size
=
voxel_size
,
with_cluster_center
=
True
,
with_voxel_center
=
True
,
point_cloud_range
=
point_cloud_range
,
norm_cfg
=
dict
(
type
=
'naiveSyncBN1d'
,
eps
=
1e-3
,
momentum
=
0.01
)),
pts_middle_encoder
=
dict
(
type
=
'PointPillarsScatter'
,
in_channels
=
64
,
output_shape
=
[
400
,
400
],
# checked from PointCloud3D
),
pts_backbone
=
dict
(
type
=
'SECOND'
,
in_channels
=
64
,
norm_cfg
=
dict
(
type
=
'naiveSyncBN2d'
,
eps
=
1e-3
,
momentum
=
0.01
),
layer_nums
=
[
3
,
5
,
5
],
layer_strides
=
[
2
,
2
,
2
],
num_filters
=
[
64
,
128
,
256
],
),
pts_neck
=
dict
(
type
=
'FPN'
,
norm_cfg
=
dict
(
type
=
'naiveSyncBN2d'
,
eps
=
1e-3
,
momentum
=
0.01
),
act_cfg
=
dict
(
type
=
'ReLU'
),
in_channels
=
[
64
,
128
,
256
],
out_channels
=
256
,
start_level
=
0
,
num_outs
=
3
,
),
pts_bbox_head
=
dict
(
type
=
'Anchor3DVeloHead'
,
class_names
=
class_names
,
num_classes
=
10
,
in_channels
=
256
,
feat_channels
=
256
,
use_direction_classifier
=
True
,
encode_bg_as_zeros
=
True
,
anchor_generator
=
dict
(
type
=
'Anchor3DRangeGenerator'
,
ranges
=
[[
-
50
,
-
50
,
-
1.80
,
50
,
50
,
-
1.80
]],
strides
=
[
1
,
2
,
4
],
sizes
=
[
[
0.577
,
1.732
,
1.
],
[
1.
,
1.
,
1.
],
[
0.4
,
0.4
,
1
],
],
custom_values
=
[
0
,
0
],
rotations
=
[
0
,
1.57
],
reshape_out
=
True
),
assigner_per_size
=
False
,
diff_rad_by_sin
=
True
,
dir_offset
=
0.7854
,
# pi/4
dir_limit_offset
=
0
,
bbox_coder
=
dict
(
type
=
'DeltaXYZWLHRBBoxCoder'
,
),
loss_cls
=
dict
(
type
=
'FocalLoss'
,
use_sigmoid
=
True
,
gamma
=
2.0
,
alpha
=
0.25
,
loss_weight
=
1.0
),
loss_bbox
=
dict
(
type
=
'SmoothL1Loss'
,
beta
=
1.0
/
9.0
,
loss_weight
=
1.0
),
loss_dir
=
dict
(
type
=
'CrossEntropyLoss'
,
use_sigmoid
=
False
,
loss_weight
=
0.2
),
),
)
# model training and testing settings
train_cfg
=
dict
(
pts
=
dict
(
assigner
=
dict
(
# for Car
type
=
'MaxIoUAssigner'
,
iou_calculator
=
dict
(
type
=
'BboxOverlapsNearest3D'
),
pos_iou_thr
=
0.6
,
neg_iou_thr
=
0.3
,
min_pos_iou
=
0.3
,
ignore_iof_thr
=-
1
),
allowed_border
=
0
,
code_weight
=
[
1.0
,
1.0
,
1.0
,
1.0
,
1.0
,
1.0
,
1.0
,
0.2
,
0.2
],
pos_weight
=-
1
,
debug
=
False
))
test_cfg
=
dict
(
pts
=
dict
(
use_rotate_nms
=
True
,
nms_across_levels
=
False
,
nms_pre
=
1000
,
nms_thr
=
0.2
,
score_thr
=
0.05
,
min_bbox_size
=
0
,
max_per_img
=
500
,
post_center_limit_range
=
point_cloud_range
,
# TODO: check whether need to change this
# post_center_limit_range=[-59.6, -59.6, -6, 59.6, 59.6, 4],
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_thr=0.5, min_score=0.05)
))
# dataset settings
dataset_type
=
'NuScenesDataset'
data_root
=
'data/nuscenes/'
img_norm_cfg
=
dict
(
mean
=
[
103.530
,
116.280
,
123.675
],
std
=
[
1.0
,
1.0
,
1.0
],
to_rgb
=
False
)
input_modality
=
dict
(
use_lidar
=
True
,
use_depth
=
False
,
use_lidar_intensity
=
True
,
use_camera
=
False
,
)
db_sampler
=
dict
(
root_path
=
data_root
,
info_path
=
data_root
+
'nuscenes_dbinfos_train.pkl'
,
rate
=
1.0
,
use_road_plane
=
False
,
object_rot_range
=
[
0.0
,
0.0
],
prepare
=
dict
(),
sample_groups
=
dict
(
bus
=
4
,
trailer
=
4
,
truck
=
4
,
),
)
train_pipeline
=
[
dict
(
type
=
'GlobalRotScale'
,
rot_uniform_noise
=
[
-
0.3925
,
0.3925
],
scaling_uniform_noise
=
[
0.95
,
1.05
],
trans_normal_noise
=
[
0
,
0
,
0
]),
dict
(
type
=
'RandomFlip3D'
,
flip_ratio
=
0.5
),
dict
(
type
=
'PointsRangeFilter'
,
point_cloud_range
=
point_cloud_range
),
dict
(
type
=
'ObjectRangeFilter'
,
point_cloud_range
=
point_cloud_range
),
dict
(
type
=
'PointShuffle'
),
dict
(
type
=
'DefaultFormatBundle3D'
,
class_names
=
class_names
),
dict
(
type
=
'Collect3D'
,
keys
=
[
'points'
,
'gt_bboxes_3d'
,
'gt_labels_3d'
]),
]
test_pipeline
=
[
dict
(
type
=
'PointsRangeFilter'
,
point_cloud_range
=
point_cloud_range
),
dict
(
type
=
'RandomFlip3D'
,
flip_ratio
=
0
),
dict
(
type
=
'DefaultFormatBundle3D'
,
class_names
=
class_names
,
with_label
=
False
),
dict
(
type
=
'Collect3D'
,
keys
=
[
'points'
]),
]
data
=
dict
(
samples_per_gpu
=
4
,
workers_per_gpu
=
4
,
train
=
dict
(
type
=
dataset_type
,
root_path
=
data_root
,
ann_file
=
data_root
+
'nuscenes_infos_train.pkl'
,
pipeline
=
train_pipeline
,
modality
=
input_modality
,
class_names
=
class_names
,
with_label
=
True
),
val
=
dict
(
type
=
dataset_type
,
root_path
=
data_root
,
ann_file
=
data_root
+
'nuscenes_infos_val.pkl'
,
pipeline
=
test_pipeline
,
modality
=
input_modality
,
class_names
=
class_names
,
with_label
=
True
),
test
=
dict
(
type
=
dataset_type
,
root_path
=
data_root
,
ann_file
=
data_root
+
'nuscenes_infos_val.pkl'
,
pipeline
=
test_pipeline
,
modality
=
input_modality
,
class_names
=
class_names
,
with_label
=
False
))
# optimizer
optimizer
=
dict
(
type
=
'AdamW'
,
lr
=
0.001
,
weight_decay
=
0.01
)
# max_norm=10 is better for SECOND
optimizer_config
=
dict
(
grad_clip
=
dict
(
max_norm
=
35
,
norm_type
=
2
))
lr_config
=
dict
(
policy
=
'step'
,
warmup
=
'linear'
,
warmup_iters
=
1000
,
warmup_ratio
=
1.0
/
1000
,
step
=
[
20
,
23
])
momentum_config
=
None
checkpoint_config
=
dict
(
interval
=
1
)
# yapf:disable
evaluation
=
dict
(
interval
=
20
)
log_config
=
dict
(
interval
=
50
,
hooks
=
[
dict
(
type
=
'TextLoggerHook'
),
dict
(
type
=
'TensorboardLoggerHook'
)
])
# yapf:enable
# runtime settings
total_epochs
=
24
dist_params
=
dict
(
backend
=
'nccl'
)
log_level
=
'INFO'
work_dir
=
'./work_dirs/hv_pointpillars_secfpn_sbn-all_4x8_2x_nus-3d'
load_from
=
None
resume_from
=
None
workflow
=
[(
'train'
,
1
)]
mmdet3d/models/necks/second_fpn.py
View file @
f95de58b
import
copy
import
torch
import
torch
import
torch.nn
as
nn
import
torch.nn
as
nn
from
mmcv.cnn
import
(
build_norm_layer
,
build_upsample_layer
,
constant_init
,
from
mmcv.cnn
import
(
build_norm_layer
,
build_upsample_layer
,
constant_init
,
...
@@ -36,19 +34,15 @@ class SECONDFPN(nn.Module):
...
@@ -36,19 +34,15 @@ class SECONDFPN(nn.Module):
deblocks
=
[]
deblocks
=
[]
for
i
,
out_channel
in
enumerate
(
out_channels
):
for
i
,
out_channel
in
enumerate
(
out_channels
):
norm_layer
=
build_norm_layer
(
norm_cfg
,
out_channel
)[
1
]
upsample_layer
=
build_upsample_layer
(
upsample_cfg_
=
copy
.
deepcopy
(
upsample_cfg
)
upsample_cfg
,
upsample_cfg_
.
update
(
in_channels
=
in_channels
[
i
],
in_channels
=
in_channels
[
i
],
out_channels
=
out_channel
,
out_channels
=
out_channel
,
padding
=
upsample_strides
[
i
],
kernel_size
=
upsample_strides
[
i
],
stride
=
upsample_strides
[
i
])
stride
=
upsample_strides
[
i
])
upsample_layer
=
build_upsample_layer
(
upsample_cfg_
)
deblock
=
nn
.
Sequential
(
upsample_layer
,
deblock
=
nn
.
Sequential
(
build_norm_layer
(
norm_cfg
,
out_channel
)[
1
],
upsample_layer
,
nn
.
ReLU
(
inplace
=
True
))
norm_layer
,
nn
.
ReLU
(
inplace
=
True
),
)
deblocks
.
append
(
deblock
)
deblocks
.
append
(
deblock
)
self
.
deblocks
=
nn
.
ModuleList
(
deblocks
)
self
.
deblocks
=
nn
.
ModuleList
(
deblocks
)
...
...
requirements/build.txt
View file @
f95de58b
# These must be installed before building mmdetection
# These must be installed before building mmdetection
numpy
numpy
torch>=1.
1
torch>=1.
3
requirements/runtime.txt
View file @
f95de58b
matplotlib
matplotlib
mmcv>=0.5.
0
mmcv>=0.5.
1
numba==0.45.1
numba==0.45.1
numpy
numpy
# need older pillow until torchvision is fixed
# need older pillow until torchvision is fixed
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment