Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
8239416f
Commit
8239416f
authored
Nov 13, 2021
by
Tai-Wang
Browse files
Merge branch 'master' into v1.0.0.dev0
parents
a46ee4f3
0cd000bf
Changes
28
Hide whitespace changes
Inline
Side-by-side
Showing
8 changed files
with
183 additions
and
7 deletions
+183
-7
docs_zh-CN/tutorials/customize_runtime.md
docs_zh-CN/tutorials/customize_runtime.md
+1
-1
docs_zh-CN/useful_tools.md
docs_zh-CN/useful_tools.md
+1
-1
mmdet3d/apis/inference.py
mmdet3d/apis/inference.py
+1
-1
mmdet3d/core/visualizer/show_result.py
mmdet3d/core/visualizer/show_result.py
+1
-1
mmdet3d/datasets/nuscenes_mono_dataset.py
mmdet3d/datasets/nuscenes_mono_dataset.py
+1
-1
mmdet3d/version.py
mmdet3d/version.py
+1
-1
tools/create_data.py
tools/create_data.py
+1
-1
tools/model_converters/convert_h3dnet_checkpoints.py
tools/model_converters/convert_h3dnet_checkpoints.py
+176
-0
No files found.
docs_zh-CN/tutorials/customize_runtime.md
View file @
8239416f
...
...
@@ -121,7 +121,7 @@ class MyOptimizerConstructor(object):
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
```
如果您的配置继承了一个已经设置了 `optimizer_config` 的基础配置,那么您可能需要 `_delete_=True` 字段来覆盖基础配置中无用的设置。详见配置文件的[说明文档](https://mmdetection.readthedocs.io/
en
/latest/tutorials/config.html)。
如果您的配置继承了一个已经设置了 `optimizer_config` 的基础配置,那么您可能需要 `_delete_=True` 字段来覆盖基础配置中无用的设置。详见配置文件的[说明文档](https://mmdetection.readthedocs.io/
zh_CN
/latest/tutorials/config.html)。
-
__使用动量规划器 (momentum scheduler) 来加速模型收敛__:
...
...
docs_zh-CN/useful_tools.md
View file @
8239416f
...
...
@@ -266,7 +266,7 @@ python -u tools/data_converter/nuimage_converter.py --data-root ${DATA_ROOT} --v
- `--nproc`: 数据准备的进程数,默认为 `4`。由于图片是并行处理的,更大的进程数目能够减少准备时间。
- `--extra-tag`: 注释的额外标签,默认为 `nuimages`。这可用于将不同时间处理的不同注释分开以供研究。
更多的数据准备细节参考 [doc](https://mmdetection3d.readthedocs.io/
en
/latest/data_preparation.html),nuImages 数据集的细节参考 [README](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/nuimages/README.md/)。
更多的数据准备细节参考 [doc](https://mmdetection3d.readthedocs.io/
zh_CN
/latest/data_preparation.html),nuImages 数据集的细节参考 [README](https://github.com/open-mmlab/mmdetection3d/blob/master/configs/nuimages/README.md/)。
 
...
...
mmdet3d/apis/inference.py
View file @
8239416f
...
...
@@ -57,7 +57,7 @@ def init_model(config, checkpoint=None, device='cuda:0'):
config
.
model
.
train_cfg
=
None
model
=
build_model
(
config
.
model
,
test_cfg
=
config
.
get
(
'test_cfg'
))
if
checkpoint
is
not
None
:
checkpoint
=
load_checkpoint
(
model
,
checkpoint
)
checkpoint
=
load_checkpoint
(
model
,
checkpoint
,
map_location
=
'cpu'
)
if
'CLASSES'
in
checkpoint
[
'meta'
]:
model
.
CLASSES
=
checkpoint
[
'meta'
][
'CLASSES'
]
else
:
...
...
mmdet3d/core/visualizer/show_result.py
View file @
8239416f
...
...
@@ -77,7 +77,7 @@ def show_result(points,
pred_bboxes
,
out_dir
,
filename
,
show
=
Tru
e
,
show
=
Fals
e
,
snapshot
=
False
):
"""Convert results into format that is directly readable for meshlab.
...
...
mmdet3d/datasets/nuscenes_mono_dataset.py
View file @
8239416f
...
...
@@ -781,7 +781,7 @@ def nusc_box_to_cam_box3d(boxes):
dims
=
torch
.
Tensor
([
b
.
wlh
for
b
in
boxes
]).
view
(
-
1
,
3
)
rots
=
torch
.
Tensor
([
b
.
orientation
.
yaw_pitch_roll
[
0
]
for
b
in
boxes
]).
view
(
-
1
,
1
)
velocity
=
torch
.
Tensor
([
b
.
velocity
[:
2
]
for
b
in
boxes
]).
view
(
-
1
,
2
)
velocity
=
torch
.
Tensor
([
b
.
velocity
[
0
:
:
2
]
for
b
in
boxes
]).
view
(
-
1
,
2
)
# convert nusbox to cambox convention
dims
[:,
[
0
,
1
,
2
]]
=
dims
[:,
[
1
,
2
,
0
]]
...
...
mmdet3d/version.py
View file @
8239416f
# Copyright (c) Open-MMLab. All rights reserved.
__version__
=
'0.17.
1
'
__version__
=
'0.17.
2
'
short_version
=
__version__
...
...
tools/create_data.py
View file @
8239416f
...
...
@@ -171,7 +171,7 @@ def waymo_data_prep(root_path,
save_dir
,
prefix
=
str
(
i
),
workers
=
workers
,
test_mode
=
(
split
==
'test'
))
test_mode
=
(
split
==
'test
ing
'
))
converter
.
convert
()
# Generate waymo infos
out_dir
=
osp
.
join
(
out_dir
,
'kitti_format'
)
...
...
tools/model_converters/convert_h3dnet_checkpoints.py
0 → 100644
View file @
8239416f
# Copyright (c) OpenMMLab. All rights reserved.
import
argparse
import
tempfile
import
torch
from
mmcv
import
Config
from
mmcv.runner
import
load_state_dict
from
mmdet3d.models
import
build_detector
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'MMDet3D upgrade model version(before v0.6.0) of H3DNet'
)
parser
.
add_argument
(
'checkpoint'
,
help
=
'checkpoint file'
)
parser
.
add_argument
(
'--out'
,
help
=
'path of the output checkpoint file'
)
args
=
parser
.
parse_args
()
return
args
def
parse_config
(
config_strings
):
"""Parse config from strings.
Args:
config_strings (string): strings of model config.
Returns:
Config: model config
"""
temp_file
=
tempfile
.
NamedTemporaryFile
()
config_path
=
f
'
{
temp_file
.
name
}
.py'
with
open
(
config_path
,
'w'
)
as
f
:
f
.
write
(
config_strings
)
config
=
Config
.
fromfile
(
config_path
)
# Update backbone config
if
'pool_mod'
in
config
.
model
.
backbone
.
backbones
:
config
.
model
.
backbone
.
backbones
.
pop
(
'pool_mod'
)
if
'sa_cfg'
not
in
config
.
model
.
backbone
:
config
.
model
.
backbone
[
'sa_cfg'
]
=
dict
(
type
=
'PointSAModule'
,
pool_mod
=
'max'
,
use_xyz
=
True
,
normalize_xyz
=
True
)
if
'type'
not
in
config
.
model
.
rpn_head
.
vote_aggregation_cfg
:
config
.
model
.
rpn_head
.
vote_aggregation_cfg
[
'type'
]
=
'PointSAModule'
# Update rpn_head config
if
'pred_layer_cfg'
not
in
config
.
model
.
rpn_head
:
config
.
model
.
rpn_head
[
'pred_layer_cfg'
]
=
dict
(
in_channels
=
128
,
shared_conv_channels
=
(
128
,
128
),
bias
=
True
)
if
'feat_channels'
in
config
.
model
.
rpn_head
:
config
.
model
.
rpn_head
.
pop
(
'feat_channels'
)
if
'vote_moudule_cfg'
in
config
.
model
.
rpn_head
:
config
.
model
.
rpn_head
[
'vote_module_cfg'
]
=
config
.
model
.
rpn_head
.
pop
(
'vote_moudule_cfg'
)
if
config
.
model
.
rpn_head
.
vote_aggregation_cfg
.
use_xyz
:
config
.
model
.
rpn_head
.
vote_aggregation_cfg
.
mlp_channels
[
0
]
-=
3
for
cfg
in
config
.
model
.
roi_head
.
primitive_list
:
cfg
[
'vote_module_cfg'
]
=
cfg
.
pop
(
'vote_moudule_cfg'
)
cfg
.
vote_aggregation_cfg
.
mlp_channels
[
0
]
-=
3
if
'type'
not
in
cfg
.
vote_aggregation_cfg
:
cfg
.
vote_aggregation_cfg
[
'type'
]
=
'PointSAModule'
if
'type'
not
in
config
.
model
.
roi_head
.
bbox_head
.
suface_matching_cfg
:
config
.
model
.
roi_head
.
bbox_head
.
suface_matching_cfg
[
'type'
]
=
'PointSAModule'
if
config
.
model
.
roi_head
.
bbox_head
.
suface_matching_cfg
.
use_xyz
:
config
.
model
.
roi_head
.
bbox_head
.
suface_matching_cfg
.
mlp_channels
[
0
]
-=
3
if
'type'
not
in
config
.
model
.
roi_head
.
bbox_head
.
line_matching_cfg
:
config
.
model
.
roi_head
.
bbox_head
.
line_matching_cfg
[
'type'
]
=
'PointSAModule'
if
config
.
model
.
roi_head
.
bbox_head
.
line_matching_cfg
.
use_xyz
:
config
.
model
.
roi_head
.
bbox_head
.
line_matching_cfg
.
mlp_channels
[
0
]
-=
3
if
'proposal_module_cfg'
in
config
.
model
.
roi_head
.
bbox_head
:
config
.
model
.
roi_head
.
bbox_head
.
pop
(
'proposal_module_cfg'
)
temp_file
.
close
()
return
config
def
main
():
"""Convert keys in checkpoints for VoteNet.
There can be some breaking changes during the development of mmdetection3d,
and this tool is used for upgrading checkpoints trained with old versions
(before v0.6.0) to the latest one.
"""
args
=
parse_args
()
checkpoint
=
torch
.
load
(
args
.
checkpoint
)
cfg
=
parse_config
(
checkpoint
[
'meta'
][
'config'
])
# Build the model and load checkpoint
model
=
build_detector
(
cfg
.
model
,
train_cfg
=
cfg
.
get
(
'train_cfg'
),
test_cfg
=
cfg
.
get
(
'test_cfg'
))
orig_ckpt
=
checkpoint
[
'state_dict'
]
converted_ckpt
=
orig_ckpt
.
copy
()
if
cfg
[
'dataset_type'
]
==
'ScanNetDataset'
:
NUM_CLASSES
=
18
elif
cfg
[
'dataset_type'
]
==
'SUNRGBDDataset'
:
NUM_CLASSES
=
10
else
:
raise
NotImplementedError
RENAME_PREFIX
=
{
'rpn_head.conv_pred.0'
:
'rpn_head.conv_pred.shared_convs.layer0'
,
'rpn_head.conv_pred.1'
:
'rpn_head.conv_pred.shared_convs.layer1'
}
DEL_KEYS
=
[
'rpn_head.conv_pred.0.bn.num_batches_tracked'
,
'rpn_head.conv_pred.1.bn.num_batches_tracked'
]
EXTRACT_KEYS
=
{
'rpn_head.conv_pred.conv_cls.weight'
:
(
'rpn_head.conv_pred.conv_out.weight'
,
[(
0
,
2
),
(
-
NUM_CLASSES
,
-
1
)]),
'rpn_head.conv_pred.conv_cls.bias'
:
(
'rpn_head.conv_pred.conv_out.bias'
,
[(
0
,
2
),
(
-
NUM_CLASSES
,
-
1
)]),
'rpn_head.conv_pred.conv_reg.weight'
:
(
'rpn_head.conv_pred.conv_out.weight'
,
[(
2
,
-
NUM_CLASSES
)]),
'rpn_head.conv_pred.conv_reg.bias'
:
(
'rpn_head.conv_pred.conv_out.bias'
,
[(
2
,
-
NUM_CLASSES
)])
}
# Delete some useless keys
for
key
in
DEL_KEYS
:
converted_ckpt
.
pop
(
key
)
# Rename keys with specific prefix
RENAME_KEYS
=
dict
()
for
old_key
in
converted_ckpt
.
keys
():
for
rename_prefix
in
RENAME_PREFIX
.
keys
():
if
rename_prefix
in
old_key
:
new_key
=
old_key
.
replace
(
rename_prefix
,
RENAME_PREFIX
[
rename_prefix
])
RENAME_KEYS
[
new_key
]
=
old_key
for
new_key
,
old_key
in
RENAME_KEYS
.
items
():
converted_ckpt
[
new_key
]
=
converted_ckpt
.
pop
(
old_key
)
# Extract weights and rename the keys
for
new_key
,
(
old_key
,
indices
)
in
EXTRACT_KEYS
.
items
():
cur_layers
=
orig_ckpt
[
old_key
]
converted_layers
=
[]
for
(
start
,
end
)
in
indices
:
if
end
!=
-
1
:
converted_layers
.
append
(
cur_layers
[
start
:
end
])
else
:
converted_layers
.
append
(
cur_layers
[
start
:])
converted_layers
=
torch
.
cat
(
converted_layers
,
0
)
converted_ckpt
[
new_key
]
=
converted_layers
if
old_key
in
converted_ckpt
.
keys
():
converted_ckpt
.
pop
(
old_key
)
# Check the converted checkpoint by loading to the model
load_state_dict
(
model
,
converted_ckpt
,
strict
=
True
)
checkpoint
[
'state_dict'
]
=
converted_ckpt
torch
.
save
(
checkpoint
,
args
.
out
)
if
__name__
==
'__main__'
:
main
()
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment