Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
7f7c73a9
Commit
7f7c73a9
authored
Jun 17, 2020
by
zhangwenwei
Browse files
Merge branch 'fix_benchmark' into 'master'
Fix benchmark See merge request open-mmlab/mmdet.3d!74
parents
235be017
e74768bf
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
199 additions
and
5 deletions
+199
-5
configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py
...ark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py
+192
-0
tools/benchmark.py
tools/benchmark.py
+7
-5
No files found.
configs/benchmark/hv_pointpillars_secfpn_3x8_100e_det3d_kitti-3d-car.py
0 → 100644
View file @
7f7c73a9
# model settings
voxel_size
=
[
0.16
,
0.16
,
4
]
point_cloud_range
=
[
0
,
-
39.68
,
-
3
,
69.12
,
39.68
,
1
]
model
=
dict
(
type
=
'VoxelNet'
,
voxel_layer
=
dict
(
max_num_points
=
64
,
point_cloud_range
=
point_cloud_range
,
voxel_size
=
voxel_size
,
max_voxels
=
(
12000
,
20000
)),
voxel_encoder
=
dict
(
type
=
'PillarFeatureNet'
,
in_channels
=
4
,
feat_channels
=
[
64
],
with_distance
=
False
,
voxel_size
=
voxel_size
,
point_cloud_range
=
point_cloud_range
),
middle_encoder
=
dict
(
type
=
'PointPillarsScatter'
,
in_channels
=
64
,
output_shape
=
[
496
,
432
]),
backbone
=
dict
(
type
=
'SECOND'
,
in_channels
=
64
,
layer_nums
=
[
3
,
5
,
5
],
layer_strides
=
[
2
,
2
,
2
],
out_channels
=
[
64
,
128
,
256
]),
neck
=
dict
(
type
=
'SECONDFPN'
,
in_channels
=
[
64
,
128
,
256
],
upsample_strides
=
[
1
,
2
,
4
],
out_channels
=
[
128
,
128
,
128
]),
bbox_head
=
dict
(
type
=
'Anchor3DHead'
,
num_classes
=
1
,
in_channels
=
384
,
feat_channels
=
384
,
use_direction_classifier
=
True
,
anchor_generator
=
dict
(
type
=
'Anchor3DRangeGenerator'
,
ranges
=
[[
0
,
-
39.68
,
-
1.78
,
69.12
,
39.68
,
-
1.78
]],
sizes
=
[[
1.6
,
3.9
,
1.56
]],
rotations
=
[
0
,
1.57
],
reshape_out
=
True
),
diff_rad_by_sin
=
True
,
bbox_coder
=
dict
(
type
=
'DeltaXYZWLHRBBoxCoder'
),
loss_cls
=
dict
(
type
=
'FocalLoss'
,
use_sigmoid
=
True
,
gamma
=
2.0
,
alpha
=
0.25
,
loss_weight
=
1.0
),
loss_bbox
=
dict
(
type
=
'SmoothL1Loss'
,
beta
=
1.0
/
9.0
,
loss_weight
=
2.0
),
loss_dir
=
dict
(
type
=
'CrossEntropyLoss'
,
use_sigmoid
=
False
,
loss_weight
=
0.2
)))
# model training and testing settings
train_cfg
=
dict
(
assigner
=
dict
(
type
=
'MaxIoUAssigner'
,
iou_calculator
=
dict
(
type
=
'BboxOverlapsNearest3D'
),
pos_iou_thr
=
0.6
,
neg_iou_thr
=
0.45
,
min_pos_iou
=
0.45
,
ignore_iof_thr
=-
1
),
allowed_border
=
0
,
pos_weight
=-
1
,
debug
=
False
)
test_cfg
=
dict
(
use_rotate_nms
=
True
,
nms_across_levels
=
False
,
nms_thr
=
0.01
,
score_thr
=
0.1
,
min_bbox_size
=
0
,
nms_pre
=
100
,
max_num
=
50
)
# dataset settings
dataset_type
=
'KittiDataset'
data_root
=
'data/kitti/'
class_names
=
[
'Car'
]
input_modality
=
dict
(
use_lidar
=
True
,
use_camera
=
False
)
db_sampler
=
dict
(
data_root
=
data_root
,
info_path
=
data_root
+
'kitti_dbinfos_train.pkl'
,
rate
=
1.0
,
object_rot_range
=
[
0.0
,
0.0
],
prepare
=
dict
(
filter_by_difficulty
=
[
-
1
],
filter_by_min_points
=
dict
(
Car
=
5
)),
sample_groups
=
dict
(
Car
=
15
),
classes
=
class_names
)
train_pipeline
=
[
dict
(
type
=
'LoadPointsFromFile'
,
load_dim
=
4
,
use_dim
=
4
),
dict
(
type
=
'LoadAnnotations3D'
,
with_bbox_3d
=
True
,
with_label_3d
=
True
),
dict
(
type
=
'ObjectSample'
,
db_sampler
=
db_sampler
),
dict
(
type
=
'ObjectNoise'
,
num_try
=
100
,
loc_noise_std
=
[
0.25
,
0.25
,
0.25
],
global_rot_range
=
[
0.0
,
0.0
],
rot_uniform_noise
=
[
-
0.15707963267
,
0.15707963267
]),
dict
(
type
=
'RandomFlip3D'
,
flip_ratio
=
0.5
),
dict
(
type
=
'GlobalRotScale'
,
rot_uniform_noise
=
[
-
0.78539816
,
0.78539816
],
scaling_uniform_noise
=
[
0.95
,
1.05
]),
dict
(
type
=
'PointsRangeFilter'
,
point_cloud_range
=
point_cloud_range
),
dict
(
type
=
'ObjectRangeFilter'
,
point_cloud_range
=
point_cloud_range
),
dict
(
type
=
'PointShuffle'
),
dict
(
type
=
'DefaultFormatBundle3D'
,
class_names
=
class_names
),
dict
(
type
=
'Collect3D'
,
keys
=
[
'points'
,
'gt_bboxes_3d'
,
'gt_labels_3d'
])
]
test_pipeline
=
[
dict
(
type
=
'LoadPointsFromFile'
,
load_dim
=
4
,
use_dim
=
4
),
dict
(
type
=
'PointsRangeFilter'
,
point_cloud_range
=
point_cloud_range
),
dict
(
type
=
'DefaultFormatBundle3D'
,
class_names
=
class_names
,
with_label
=
False
),
dict
(
type
=
'Collect3D'
,
keys
=
[
'points'
])
]
data
=
dict
(
samples_per_gpu
=
3
,
workers_per_gpu
=
3
,
train
=
dict
(
type
=
'RepeatDataset'
,
times
=
2
,
dataset
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
data_root
+
'kitti_infos_train.pkl'
,
split
=
'training'
,
pts_prefix
=
'velodyne_reduced'
,
pipeline
=
train_pipeline
,
modality
=
input_modality
,
classes
=
class_names
,
test_mode
=
False
)),
val
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
data_root
+
'kitti_infos_val.pkl'
,
split
=
'training'
,
pts_prefix
=
'velodyne_reduced'
,
pipeline
=
test_pipeline
,
modality
=
input_modality
,
classes
=
class_names
,
test_mode
=
True
),
test
=
dict
(
type
=
dataset_type
,
data_root
=
data_root
,
ann_file
=
data_root
+
'kitti_infos_val.pkl'
,
split
=
'training'
,
pts_prefix
=
'velodyne_reduced'
,
pipeline
=
test_pipeline
,
modality
=
input_modality
,
classes
=
class_names
,
test_mode
=
True
))
# optimizer
lr
=
0.001
# max learning rate
optimizer
=
dict
(
type
=
'AdamW'
,
lr
=
lr
,
betas
=
(
0.95
,
0.99
),
# the momentum is change during training
weight_decay
=
0.01
)
optimizer_config
=
dict
(
grad_clip
=
dict
(
max_norm
=
35
,
norm_type
=
2
))
# learning policy
lr_config
=
dict
(
policy
=
'cyclic'
,
target_ratio
=
(
10
,
1e-4
),
cyclic_times
=
1
,
step_ratio_up
=
0.4
)
momentum_config
=
dict
(
policy
=
'cyclic'
,
target_ratio
=
(
0.85
/
0.95
,
1
),
cyclic_times
=
1
,
step_ratio_up
=
0.4
)
checkpoint_config
=
dict
(
interval
=
1
)
evaluation
=
dict
(
interval
=
1
)
# yapf:disable
log_config
=
dict
(
interval
=
50
,
hooks
=
[
dict
(
type
=
'TextLoggerHook'
),
dict
(
type
=
'TensorboardLoggerHook'
)
])
# yapf:enable
# runtime settings
total_epochs
=
50
dist_params
=
dict
(
backend
=
'nccl'
)
log_level
=
'INFO'
work_dir
=
'./work_dirs/pp_secfpn_100e'
load_from
=
None
resume_from
=
None
workflow
=
[(
'train'
,
50
)]
tools/benchmark.py
View file @
7f7c73a9
...
@@ -7,15 +7,16 @@ from mmcv.parallel import MMDataParallel
...
@@ -7,15 +7,16 @@ from mmcv.parallel import MMDataParallel
from
mmcv.runner
import
load_checkpoint
from
mmcv.runner
import
load_checkpoint
from
tools.fuse_conv_bn
import
fuse_module
from
tools.fuse_conv_bn
import
fuse_module
from
mmdet3d.datasets
import
build_dataloader
,
build_dataset
from
mmdet3d.models
import
build_detector
from
mmdet.core
import
wrap_fp16_model
from
mmdet.core
import
wrap_fp16_model
from
mmdet.datasets
import
build_dataloader
,
build_dataset
from
mmdet.models
import
build_detector
def
parse_args
():
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'MMDet benchmark a model'
)
parser
=
argparse
.
ArgumentParser
(
description
=
'MMDet benchmark a model'
)
parser
.
add_argument
(
'config'
,
help
=
'test config file path'
)
parser
.
add_argument
(
'config'
,
help
=
'test config file path'
)
parser
.
add_argument
(
'checkpoint'
,
help
=
'checkpoint file'
)
parser
.
add_argument
(
'checkpoint'
,
help
=
'checkpoint file'
)
parser
.
add_argument
(
'--samples'
,
default
=
2000
,
help
=
'samples to benchmark'
)
parser
.
add_argument
(
parser
.
add_argument
(
'--log-interval'
,
default
=
50
,
help
=
'interval of logging'
)
'--log-interval'
,
default
=
50
,
help
=
'interval of logging'
)
parser
.
add_argument
(
parser
.
add_argument
(
...
@@ -64,7 +65,7 @@ def main():
...
@@ -64,7 +65,7 @@ def main():
num_warmup
=
5
num_warmup
=
5
pure_inf_time
=
0
pure_inf_time
=
0
# benchmark with
2000 image
and take the average
# benchmark with
several samples
and take the average
for
i
,
data
in
enumerate
(
data_loader
):
for
i
,
data
in
enumerate
(
data_loader
):
torch
.
cuda
.
synchronize
()
torch
.
cuda
.
synchronize
()
...
@@ -80,9 +81,10 @@ def main():
...
@@ -80,9 +81,10 @@ def main():
pure_inf_time
+=
elapsed
pure_inf_time
+=
elapsed
if
(
i
+
1
)
%
args
.
log_interval
==
0
:
if
(
i
+
1
)
%
args
.
log_interval
==
0
:
fps
=
(
i
+
1
-
num_warmup
)
/
pure_inf_time
fps
=
(
i
+
1
-
num_warmup
)
/
pure_inf_time
print
(
f
'Done image [
{
i
+
1
:
<
3
}
/ 2000], fps:
{
fps
:.
1
f
}
img / s'
)
print
(
f
'Done image [
{
i
+
1
:
<
3
}
/
{
args
.
samples
}
], '
f
'fps:
{
fps
:.
1
f
}
img / s'
)
if
(
i
+
1
)
==
2000
:
if
(
i
+
1
)
==
args
.
samples
:
pure_inf_time
+=
elapsed
pure_inf_time
+=
elapsed
fps
=
(
i
+
1
-
num_warmup
)
/
pure_inf_time
fps
=
(
i
+
1
-
num_warmup
)
/
pure_inf_time
print
(
f
'Overall fps:
{
fps
:.
1
f
}
img / s'
)
print
(
f
'Overall fps:
{
fps
:.
1
f
}
img / s'
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment