Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dcnv3
Commits
305e110f
Commit
305e110f
authored
Jul 18, 2023
by
yeshenglong1
Browse files
update tools
parent
631a5159
Changes
37
Show whitespace changes
Inline
Side-by-side
Showing
17 changed files
with
1692 additions
and
0 deletions
+1692
-0
autonomous_driving/openlane-v2/tools/deployment/test_torchserver.py
..._driving/openlane-v2/tools/deployment/test_torchserver.py
+56
-0
autonomous_driving/openlane-v2/tools/dist_test.sh
autonomous_driving/openlane-v2/tools/dist_test.sh
+22
-0
autonomous_driving/openlane-v2/tools/dist_train.sh
autonomous_driving/openlane-v2/tools/dist_train.sh
+20
-0
autonomous_driving/openlane-v2/tools/misc/browse_dataset.py
autonomous_driving/openlane-v2/tools/misc/browse_dataset.py
+232
-0
autonomous_driving/openlane-v2/tools/misc/fuse_conv_bn.py
autonomous_driving/openlane-v2/tools/misc/fuse_conv_bn.py
+68
-0
autonomous_driving/openlane-v2/tools/misc/print_config.py
autonomous_driving/openlane-v2/tools/misc/print_config.py
+27
-0
autonomous_driving/openlane-v2/tools/misc/visualize_results.py
...omous_driving/openlane-v2/tools/misc/visualize_results.py
+50
-0
autonomous_driving/openlane-v2/tools/model_converters/convert_h3dnet_checkpoints.py
...e-v2/tools/model_converters/convert_h3dnet_checkpoints.py
+177
-0
autonomous_driving/openlane-v2/tools/model_converters/convert_votenet_checkpoints.py
...-v2/tools/model_converters/convert_votenet_checkpoints.py
+153
-0
autonomous_driving/openlane-v2/tools/model_converters/publish_model.py
...iving/openlane-v2/tools/model_converters/publish_model.py
+36
-0
autonomous_driving/openlane-v2/tools/model_converters/regnet2mmdet.py
...riving/openlane-v2/tools/model_converters/regnet2mmdet.py
+90
-0
autonomous_driving/openlane-v2/tools/slurm_test.sh
autonomous_driving/openlane-v2/tools/slurm_test.sh
+24
-0
autonomous_driving/openlane-v2/tools/slurm_train.sh
autonomous_driving/openlane-v2/tools/slurm_train.sh
+24
-0
autonomous_driving/openlane-v2/tools/test.py
autonomous_driving/openlane-v2/tools/test.py
+260
-0
autonomous_driving/openlane-v2/tools/train.py
autonomous_driving/openlane-v2/tools/train.py
+263
-0
autonomous_driving/openlane-v2/tools/update_data_coords.py
autonomous_driving/openlane-v2/tools/update_data_coords.py
+168
-0
autonomous_driving/openlane-v2/tools/update_data_coords.sh
autonomous_driving/openlane-v2/tools/update_data_coords.sh
+22
-0
No files found.
autonomous_driving/openlane-v2/tools/deployment/test_torchserver.py
0 → 100644
View file @
305e110f
from
argparse
import
ArgumentParser
import
numpy
as
np
import
requests
from
mmdet3d.apis
import
inference_detector
,
init_model
def
parse_args
():
parser
=
ArgumentParser
()
parser
.
add_argument
(
'pcd'
,
help
=
'Point cloud file'
)
parser
.
add_argument
(
'config'
,
help
=
'Config file'
)
parser
.
add_argument
(
'checkpoint'
,
help
=
'Checkpoint file'
)
parser
.
add_argument
(
'model_name'
,
help
=
'The model name in the server'
)
parser
.
add_argument
(
'--inference-addr'
,
default
=
'127.0.0.1:8080'
,
help
=
'Address and port of the inference server'
)
parser
.
add_argument
(
'--device'
,
default
=
'cuda:0'
,
help
=
'Device used for inference'
)
parser
.
add_argument
(
'--score-thr'
,
type
=
float
,
default
=
0.5
,
help
=
'3d bbox score threshold'
)
args
=
parser
.
parse_args
()
return
args
def
parse_result
(
input
):
bbox
=
input
[
0
][
'3dbbox'
]
result
=
np
.
array
(
bbox
)
return
result
def
main
(
args
):
# build the model from a config file and a checkpoint file
model
=
init_model
(
args
.
config
,
args
.
checkpoint
,
device
=
args
.
device
)
# test a single point cloud file
model_result
,
_
=
inference_detector
(
model
,
args
.
pcd
)
# filter the 3d bboxes whose scores > 0.5
if
'pts_bbox'
in
model_result
[
0
].
keys
():
pred_bboxes
=
model_result
[
0
][
'pts_bbox'
][
'boxes_3d'
].
tensor
.
numpy
()
pred_scores
=
model_result
[
0
][
'pts_bbox'
][
'scores_3d'
].
numpy
()
else
:
pred_bboxes
=
model_result
[
0
][
'boxes_3d'
].
tensor
.
numpy
()
pred_scores
=
model_result
[
0
][
'scores_3d'
].
numpy
()
model_result
=
pred_bboxes
[
pred_scores
>
0.5
]
url
=
'http://'
+
args
.
inference_addr
+
'/predictions/'
+
args
.
model_name
with
open
(
args
.
pcd
,
'rb'
)
as
points
:
response
=
requests
.
post
(
url
,
points
)
server_result
=
parse_result
(
response
.
json
())
assert
np
.
allclose
(
model_result
,
server_result
)
if
__name__
==
'__main__'
:
args
=
parse_args
()
main
(
args
)
autonomous_driving/openlane-v2/tools/dist_test.sh
0 → 100644
View file @
305e110f
#!/usr/bin/env bash
CONFIG
=
$1
CHECKPOINT
=
$2
GPUS
=
$3
NNODES
=
${
NNODES
:-
1
}
NODE_RANK
=
${
NODE_RANK
:-
0
}
PORT
=
${
PORT
:-
29500
}
MASTER_ADDR
=
${
MASTER_ADDR
:-
"127.0.0.1"
}
PYTHONPATH
=
"
$(
dirname
$0
)
/.."
:
$PYTHONPATH
\
python
-m
torch.distributed.launch
\
--nnodes
=
$NNODES
\
--node_rank
=
$NODE_RANK
\
--master_addr
=
$MASTER_ADDR
\
--nproc_per_node
=
$GPUS
\
--master_port
=
$PORT
\
$(
dirname
"
$0
"
)
/test.py
\
$CONFIG
\
$CHECKPOINT
\
--launcher
pytorch
\
${
@
:4
}
autonomous_driving/openlane-v2/tools/dist_train.sh
0 → 100644
View file @
305e110f
#!/usr/bin/env bash
CONFIG
=
$1
GPUS
=
$2
NNODES
=
${
NNODES
:-
1
}
NODE_RANK
=
${
NODE_RANK
:-
0
}
PORT
=
${
PORT
:-
29500
}
MASTER_ADDR
=
${
MASTER_ADDR
:-
"127.0.0.1"
}
PYTHONPATH
=
"
$(
dirname
$0
)
/.."
:
$PYTHONPATH
\
python
-m
torch.distributed.launch
\
--nnodes
=
$NNODES
\
--node_rank
=
$NODE_RANK
\
--master_addr
=
$MASTER_ADDR
\
--nproc_per_node
=
$GPUS
\
--master_port
=
$PORT
\
$(
dirname
"
$0
"
)
/train.py
\
$CONFIG
\
--seed
0
\
--launcher
pytorch
${
@
:3
}
autonomous_driving/openlane-v2/tools/misc/browse_dataset.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
argparse
import
warnings
from
os
import
path
as
osp
from
pathlib
import
Path
import
mmcv
import
numpy
as
np
from
mmcv
import
Config
,
DictAction
,
mkdir_or_exist
from
mmdet3d.core.bbox
import
(
Box3DMode
,
CameraInstance3DBoxes
,
Coord3DMode
,
DepthInstance3DBoxes
,
LiDARInstance3DBoxes
)
from
mmdet3d.core.visualizer
import
(
show_multi_modality_result
,
show_result
,
show_seg_result
)
from
mmdet3d.datasets
import
build_dataset
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Browse a dataset'
)
parser
.
add_argument
(
'config'
,
help
=
'train config file path'
)
parser
.
add_argument
(
'--skip-type'
,
type
=
str
,
nargs
=
'+'
,
default
=
[
'Normalize'
],
help
=
'skip some useless pipeline'
)
parser
.
add_argument
(
'--output-dir'
,
default
=
None
,
type
=
str
,
help
=
'If there is no display interface, you can save it'
)
parser
.
add_argument
(
'--task'
,
type
=
str
,
choices
=
[
'det'
,
'seg'
,
'multi_modality-det'
,
'mono-det'
],
help
=
'Determine the visualization method depending on the task.'
)
parser
.
add_argument
(
'--aug'
,
action
=
'store_true'
,
help
=
'Whether to visualize augmented datasets or original dataset.'
)
parser
.
add_argument
(
'--online'
,
action
=
'store_true'
,
help
=
'Whether to perform online visualization. Note that you often '
'need a monitor to do so.'
)
parser
.
add_argument
(
'--cfg-options'
,
nargs
=
'+'
,
action
=
DictAction
,
help
=
'override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.'
)
args
=
parser
.
parse_args
()
return
args
def
build_data_cfg
(
config_path
,
skip_type
,
aug
,
cfg_options
):
"""Build data config for loading visualization data."""
cfg
=
Config
.
fromfile
(
config_path
)
if
cfg_options
is
not
None
:
cfg
.
merge_from_dict
(
cfg_options
)
# extract inner dataset of `RepeatDataset` as `cfg.data.train`
# so we don't need to worry about it later
if
cfg
.
data
.
train
[
'type'
]
==
'RepeatDataset'
:
cfg
.
data
.
train
=
cfg
.
data
.
train
.
dataset
# use only first dataset for `ConcatDataset`
if
cfg
.
data
.
train
[
'type'
]
==
'ConcatDataset'
:
cfg
.
data
.
train
=
cfg
.
data
.
train
.
datasets
[
0
]
train_data_cfg
=
cfg
.
data
.
train
if
aug
:
show_pipeline
=
cfg
.
train_pipeline
else
:
show_pipeline
=
cfg
.
eval_pipeline
for
i
in
range
(
len
(
cfg
.
train_pipeline
)):
if
cfg
.
train_pipeline
[
i
][
'type'
]
==
'LoadAnnotations3D'
:
show_pipeline
.
insert
(
i
,
cfg
.
train_pipeline
[
i
])
# Collect points as well as labels
if
cfg
.
train_pipeline
[
i
][
'type'
]
==
'Collect3D'
:
if
show_pipeline
[
-
1
][
'type'
]
==
'Collect3D'
:
show_pipeline
[
-
1
]
=
cfg
.
train_pipeline
[
i
]
else
:
show_pipeline
.
append
(
cfg
.
train_pipeline
[
i
])
train_data_cfg
[
'pipeline'
]
=
[
x
for
x
in
show_pipeline
if
x
[
'type'
]
not
in
skip_type
]
return
cfg
def
to_depth_mode
(
points
,
bboxes
):
"""Convert points and bboxes to Depth Coord and Depth Box mode."""
if
points
is
not
None
:
points
=
Coord3DMode
.
convert_point
(
points
.
copy
(),
Coord3DMode
.
LIDAR
,
Coord3DMode
.
DEPTH
)
if
bboxes
is
not
None
:
bboxes
=
Box3DMode
.
convert
(
bboxes
.
clone
(),
Box3DMode
.
LIDAR
,
Box3DMode
.
DEPTH
)
return
points
,
bboxes
def
show_det_data
(
input
,
out_dir
,
show
=
False
):
"""Visualize 3D point cloud and 3D bboxes."""
img_metas
=
input
[
'img_metas'
].
_data
points
=
input
[
'points'
].
_data
.
numpy
()
gt_bboxes
=
input
[
'gt_bboxes_3d'
].
_data
.
tensor
if
img_metas
[
'box_mode_3d'
]
!=
Box3DMode
.
DEPTH
:
points
,
gt_bboxes
=
to_depth_mode
(
points
,
gt_bboxes
)
filename
=
osp
.
splitext
(
osp
.
basename
(
img_metas
[
'pts_filename'
]))[
0
]
show_result
(
points
,
gt_bboxes
.
clone
(),
None
,
out_dir
,
filename
,
show
=
show
,
snapshot
=
True
)
def
show_seg_data
(
input
,
out_dir
,
show
=
False
):
"""Visualize 3D point cloud and segmentation mask."""
img_metas
=
input
[
'img_metas'
].
_data
points
=
input
[
'points'
].
_data
.
numpy
()
gt_seg
=
input
[
'pts_semantic_mask'
].
_data
.
numpy
()
filename
=
osp
.
splitext
(
osp
.
basename
(
img_metas
[
'pts_filename'
]))[
0
]
show_seg_result
(
points
,
gt_seg
.
copy
(),
None
,
out_dir
,
filename
,
np
.
array
(
img_metas
[
'PALETTE'
]),
img_metas
[
'ignore_index'
],
show
=
show
,
snapshot
=
True
)
def
show_proj_bbox_img
(
input
,
out_dir
,
show
=
False
,
is_nus_mono
=
False
):
"""Visualize 3D bboxes on 2D image by projection."""
gt_bboxes
=
input
[
'gt_bboxes_3d'
].
_data
img_metas
=
input
[
'img_metas'
].
_data
img
=
input
[
'img'
].
_data
.
numpy
()
# need to transpose channel to first dim
img
=
img
.
transpose
(
1
,
2
,
0
)
# no 3D gt bboxes, just show img
if
gt_bboxes
.
tensor
.
shape
[
0
]
==
0
:
gt_bboxes
=
None
filename
=
Path
(
img_metas
[
'filename'
]).
name
if
isinstance
(
gt_bboxes
,
DepthInstance3DBoxes
):
show_multi_modality_result
(
img
,
gt_bboxes
,
None
,
None
,
out_dir
,
filename
,
box_mode
=
'depth'
,
img_metas
=
img_metas
,
show
=
show
)
elif
isinstance
(
gt_bboxes
,
LiDARInstance3DBoxes
):
show_multi_modality_result
(
img
,
gt_bboxes
,
None
,
img_metas
[
'lidar2img'
],
out_dir
,
filename
,
box_mode
=
'lidar'
,
img_metas
=
img_metas
,
show
=
show
)
elif
isinstance
(
gt_bboxes
,
CameraInstance3DBoxes
):
show_multi_modality_result
(
img
,
gt_bboxes
,
None
,
img_metas
[
'cam2img'
],
out_dir
,
filename
,
box_mode
=
'camera'
,
img_metas
=
img_metas
,
show
=
show
)
else
:
# can't project, just show img
warnings
.
warn
(
f
'unrecognized gt box type
{
type
(
gt_bboxes
)
}
, only show image'
)
show_multi_modality_result
(
img
,
None
,
None
,
None
,
out_dir
,
filename
,
show
=
show
)
def
main
():
args
=
parse_args
()
if
args
.
output_dir
is
not
None
:
mkdir_or_exist
(
args
.
output_dir
)
cfg
=
build_data_cfg
(
args
.
config
,
args
.
skip_type
,
args
.
aug
,
args
.
cfg_options
)
try
:
dataset
=
build_dataset
(
cfg
.
data
.
train
,
default_args
=
dict
(
filter_empty_gt
=
False
))
except
TypeError
:
# seg dataset doesn't have `filter_empty_gt` key
dataset
=
build_dataset
(
cfg
.
data
.
train
)
dataset_type
=
cfg
.
dataset_type
# configure visualization mode
vis_task
=
args
.
task
# 'det', 'seg', 'multi_modality-det', 'mono-det'
progress_bar
=
mmcv
.
ProgressBar
(
len
(
dataset
))
for
input
in
dataset
:
if
vis_task
in
[
'det'
,
'multi_modality-det'
]:
# show 3D bboxes on 3D point clouds
show_det_data
(
input
,
args
.
output_dir
,
show
=
args
.
online
)
if
vis_task
in
[
'multi_modality-det'
,
'mono-det'
]:
# project 3D bboxes to 2D image
show_proj_bbox_img
(
input
,
args
.
output_dir
,
show
=
args
.
online
,
is_nus_mono
=
(
dataset_type
==
'NuScenesMonoDataset'
))
elif
vis_task
in
[
'seg'
]:
# show 3D segmentation mask on 3D point clouds
show_seg_data
(
input
,
args
.
output_dir
,
show
=
args
.
online
)
progress_bar
.
update
()
if
__name__
==
'__main__'
:
main
()
autonomous_driving/openlane-v2/tools/misc/fuse_conv_bn.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
argparse
import
torch
from
mmcv.runner
import
save_checkpoint
from
torch
import
nn
as
nn
from
mmdet3d.apis
import
init_model
def
fuse_conv_bn
(
conv
,
bn
):
"""During inference, the functionary of batch norm layers is turned off but
only the mean and var alone channels are used, which exposes the chance to
fuse it with the preceding conv layers to save computations and simplify
network structures."""
conv_w
=
conv
.
weight
conv_b
=
conv
.
bias
if
conv
.
bias
is
not
None
else
torch
.
zeros_like
(
bn
.
running_mean
)
factor
=
bn
.
weight
/
torch
.
sqrt
(
bn
.
running_var
+
bn
.
eps
)
conv
.
weight
=
nn
.
Parameter
(
conv_w
*
factor
.
reshape
([
conv
.
out_channels
,
1
,
1
,
1
]))
conv
.
bias
=
nn
.
Parameter
((
conv_b
-
bn
.
running_mean
)
*
factor
+
bn
.
bias
)
return
conv
def
fuse_module
(
m
):
last_conv
=
None
last_conv_name
=
None
for
name
,
child
in
m
.
named_children
():
if
isinstance
(
child
,
(
nn
.
BatchNorm2d
,
nn
.
SyncBatchNorm
)):
if
last_conv
is
None
:
# only fuse BN that is after Conv
continue
fused_conv
=
fuse_conv_bn
(
last_conv
,
child
)
m
.
_modules
[
last_conv_name
]
=
fused_conv
# To reduce changes, set BN as Identity instead of deleting it.
m
.
_modules
[
name
]
=
nn
.
Identity
()
last_conv
=
None
elif
isinstance
(
child
,
nn
.
Conv2d
):
last_conv
=
child
last_conv_name
=
name
else
:
fuse_module
(
child
)
return
m
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'fuse Conv and BN layers in a model'
)
parser
.
add_argument
(
'config'
,
help
=
'config file path'
)
parser
.
add_argument
(
'checkpoint'
,
help
=
'checkpoint file path'
)
parser
.
add_argument
(
'out'
,
help
=
'output path of the converted model'
)
args
=
parser
.
parse_args
()
return
args
def
main
():
args
=
parse_args
()
# build the model from a config file and a checkpoint file
model
=
init_model
(
args
.
config
,
args
.
checkpoint
)
# fuse conv and bn layers of the model
fused_model
=
fuse_module
(
model
)
save_checkpoint
(
fused_model
,
args
.
out
)
if
__name__
==
'__main__'
:
main
()
autonomous_driving/openlane-v2/tools/misc/print_config.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
argparse
from
mmcv
import
Config
,
DictAction
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Print the whole config'
)
parser
.
add_argument
(
'config'
,
help
=
'config file path'
)
parser
.
add_argument
(
'--options'
,
nargs
=
'+'
,
action
=
DictAction
,
help
=
'arguments in dict'
)
args
=
parser
.
parse_args
()
return
args
def
main
():
args
=
parse_args
()
cfg
=
Config
.
fromfile
(
args
.
config
)
if
args
.
options
is
not
None
:
cfg
.
merge_from_dict
(
args
.
options
)
print
(
f
'Config:
\n
{
cfg
.
pretty_text
}
'
)
if
__name__
==
'__main__'
:
main
()
autonomous_driving/openlane-v2/tools/misc/visualize_results.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
argparse
import
mmcv
from
mmcv
import
Config
from
mmdet3d.datasets
import
build_dataset
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'MMDet3D visualize the results'
)
parser
.
add_argument
(
'config'
,
help
=
'test config file path'
)
parser
.
add_argument
(
'--result'
,
help
=
'results file in pickle format'
)
parser
.
add_argument
(
'--show-dir'
,
help
=
'directory where visualize results will be saved'
)
args
=
parser
.
parse_args
()
return
args
def
main
():
args
=
parse_args
()
if
args
.
result
is
not
None
and
\
not
args
.
result
.
endswith
((
'.pkl'
,
'.pickle'
)):
raise
ValueError
(
'The results file must be a pkl file.'
)
cfg
=
Config
.
fromfile
(
args
.
config
)
cfg
.
data
.
test
.
test_mode
=
True
# build the dataset
dataset
=
build_dataset
(
cfg
.
data
.
test
)
results
=
mmcv
.
load
(
args
.
result
)
if
getattr
(
dataset
,
'show'
,
None
)
is
not
None
:
# data loading pipeline for showing
eval_pipeline
=
cfg
.
get
(
'eval_pipeline'
,
{})
if
eval_pipeline
:
dataset
.
show
(
results
,
args
.
show_dir
,
pipeline
=
eval_pipeline
)
else
:
dataset
.
show
(
results
,
args
.
show_dir
)
# use default pipeline
else
:
raise
NotImplementedError
(
'Show is not implemented for dataset {}!'
.
format
(
type
(
dataset
).
__name__
))
if
__name__
==
'__main__'
:
main
()
autonomous_driving/openlane-v2/tools/model_converters/convert_h3dnet_checkpoints.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
argparse
import
tempfile
import
torch
from
mmcv
import
Config
from
mmcv.runner
import
load_state_dict
from
mmdet3d.models
import
build_detector
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'MMDet3D upgrade model version(before v0.6.0) of H3DNet'
)
parser
.
add_argument
(
'checkpoint'
,
help
=
'checkpoint file'
)
parser
.
add_argument
(
'--out'
,
help
=
'path of the output checkpoint file'
)
args
=
parser
.
parse_args
()
return
args
def
parse_config
(
config_strings
):
"""Parse config from strings.
Args:
config_strings (string): strings of model config.
Returns:
Config: model config
"""
temp_file
=
tempfile
.
NamedTemporaryFile
()
config_path
=
f
'
{
temp_file
.
name
}
.py'
with
open
(
config_path
,
'w'
)
as
f
:
f
.
write
(
config_strings
)
config
=
Config
.
fromfile
(
config_path
)
# Update backbone config
if
'pool_mod'
in
config
.
model
.
backbone
.
backbones
:
config
.
model
.
backbone
.
backbones
.
pop
(
'pool_mod'
)
if
'sa_cfg'
not
in
config
.
model
.
backbone
:
config
.
model
.
backbone
[
'sa_cfg'
]
=
dict
(
type
=
'PointSAModule'
,
pool_mod
=
'max'
,
use_xyz
=
True
,
normalize_xyz
=
True
)
if
'type'
not
in
config
.
model
.
rpn_head
.
vote_aggregation_cfg
:
config
.
model
.
rpn_head
.
vote_aggregation_cfg
[
'type'
]
=
'PointSAModule'
# Update rpn_head config
if
'pred_layer_cfg'
not
in
config
.
model
.
rpn_head
:
config
.
model
.
rpn_head
[
'pred_layer_cfg'
]
=
dict
(
in_channels
=
128
,
shared_conv_channels
=
(
128
,
128
),
bias
=
True
)
if
'feat_channels'
in
config
.
model
.
rpn_head
:
config
.
model
.
rpn_head
.
pop
(
'feat_channels'
)
if
'vote_moudule_cfg'
in
config
.
model
.
rpn_head
:
config
.
model
.
rpn_head
[
'vote_module_cfg'
]
=
config
.
model
.
rpn_head
.
pop
(
'vote_moudule_cfg'
)
if
config
.
model
.
rpn_head
.
vote_aggregation_cfg
.
use_xyz
:
config
.
model
.
rpn_head
.
vote_aggregation_cfg
.
mlp_channels
[
0
]
-=
3
for
cfg
in
config
.
model
.
roi_head
.
primitive_list
:
cfg
[
'vote_module_cfg'
]
=
cfg
.
pop
(
'vote_moudule_cfg'
)
cfg
.
vote_aggregation_cfg
.
mlp_channels
[
0
]
-=
3
if
'type'
not
in
cfg
.
vote_aggregation_cfg
:
cfg
.
vote_aggregation_cfg
[
'type'
]
=
'PointSAModule'
if
'type'
not
in
config
.
model
.
roi_head
.
bbox_head
.
suface_matching_cfg
:
config
.
model
.
roi_head
.
bbox_head
.
suface_matching_cfg
[
'type'
]
=
'PointSAModule'
if
config
.
model
.
roi_head
.
bbox_head
.
suface_matching_cfg
.
use_xyz
:
config
.
model
.
roi_head
.
bbox_head
.
suface_matching_cfg
.
mlp_channels
[
0
]
-=
3
if
'type'
not
in
config
.
model
.
roi_head
.
bbox_head
.
line_matching_cfg
:
config
.
model
.
roi_head
.
bbox_head
.
line_matching_cfg
[
'type'
]
=
'PointSAModule'
if
config
.
model
.
roi_head
.
bbox_head
.
line_matching_cfg
.
use_xyz
:
config
.
model
.
roi_head
.
bbox_head
.
line_matching_cfg
.
mlp_channels
[
0
]
-=
3
if
'proposal_module_cfg'
in
config
.
model
.
roi_head
.
bbox_head
:
config
.
model
.
roi_head
.
bbox_head
.
pop
(
'proposal_module_cfg'
)
temp_file
.
close
()
return
config
def
main
():
"""Convert keys in checkpoints for VoteNet.
There can be some breaking changes during the development of mmdetection3d,
and this tool is used for upgrading checkpoints trained with old versions
(before v0.6.0) to the latest one.
"""
args
=
parse_args
()
checkpoint
=
torch
.
load
(
args
.
checkpoint
)
cfg
=
parse_config
(
checkpoint
[
'meta'
][
'config'
])
# Build the model and load checkpoint
model
=
build_detector
(
cfg
.
model
,
train_cfg
=
cfg
.
get
(
'train_cfg'
),
test_cfg
=
cfg
.
get
(
'test_cfg'
))
orig_ckpt
=
checkpoint
[
'state_dict'
]
converted_ckpt
=
orig_ckpt
.
copy
()
if
cfg
[
'dataset_type'
]
==
'ScanNetDataset'
:
NUM_CLASSES
=
18
elif
cfg
[
'dataset_type'
]
==
'SUNRGBDDataset'
:
NUM_CLASSES
=
10
else
:
raise
NotImplementedError
RENAME_PREFIX
=
{
'rpn_head.conv_pred.0'
:
'rpn_head.conv_pred.shared_convs.layer0'
,
'rpn_head.conv_pred.1'
:
'rpn_head.conv_pred.shared_convs.layer1'
}
DEL_KEYS
=
[
'rpn_head.conv_pred.0.bn.num_batches_tracked'
,
'rpn_head.conv_pred.1.bn.num_batches_tracked'
]
EXTRACT_KEYS
=
{
'rpn_head.conv_pred.conv_cls.weight'
:
(
'rpn_head.conv_pred.conv_out.weight'
,
[(
0
,
2
),
(
-
NUM_CLASSES
,
-
1
)]),
'rpn_head.conv_pred.conv_cls.bias'
:
(
'rpn_head.conv_pred.conv_out.bias'
,
[(
0
,
2
),
(
-
NUM_CLASSES
,
-
1
)]),
'rpn_head.conv_pred.conv_reg.weight'
:
(
'rpn_head.conv_pred.conv_out.weight'
,
[(
2
,
-
NUM_CLASSES
)]),
'rpn_head.conv_pred.conv_reg.bias'
:
(
'rpn_head.conv_pred.conv_out.bias'
,
[(
2
,
-
NUM_CLASSES
)])
}
# Delete some useless keys
for
key
in
DEL_KEYS
:
converted_ckpt
.
pop
(
key
)
# Rename keys with specific prefix
RENAME_KEYS
=
dict
()
for
old_key
in
converted_ckpt
.
keys
():
for
rename_prefix
in
RENAME_PREFIX
.
keys
():
if
rename_prefix
in
old_key
:
new_key
=
old_key
.
replace
(
rename_prefix
,
RENAME_PREFIX
[
rename_prefix
])
RENAME_KEYS
[
new_key
]
=
old_key
for
new_key
,
old_key
in
RENAME_KEYS
.
items
():
converted_ckpt
[
new_key
]
=
converted_ckpt
.
pop
(
old_key
)
# Extract weights and rename the keys
for
new_key
,
(
old_key
,
indices
)
in
EXTRACT_KEYS
.
items
():
cur_layers
=
orig_ckpt
[
old_key
]
converted_layers
=
[]
for
(
start
,
end
)
in
indices
:
if
end
!=
-
1
:
converted_layers
.
append
(
cur_layers
[
start
:
end
])
else
:
converted_layers
.
append
(
cur_layers
[
start
:])
converted_layers
=
torch
.
cat
(
converted_layers
,
0
)
converted_ckpt
[
new_key
]
=
converted_layers
if
old_key
in
converted_ckpt
.
keys
():
converted_ckpt
.
pop
(
old_key
)
# Check the converted checkpoint by loading to the model
load_state_dict
(
model
,
converted_ckpt
,
strict
=
True
)
checkpoint
[
'state_dict'
]
=
converted_ckpt
torch
.
save
(
checkpoint
,
args
.
out
)
if
__name__
==
'__main__'
:
main
()
autonomous_driving/openlane-v2/tools/model_converters/convert_votenet_checkpoints.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
argparse
import
tempfile
import
torch
from
mmcv
import
Config
from
mmcv.runner
import
load_state_dict
from
mmdet3d.models
import
build_detector
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'MMDet3D upgrade model version(before v0.6.0) of VoteNet'
)
parser
.
add_argument
(
'checkpoint'
,
help
=
'checkpoint file'
)
parser
.
add_argument
(
'--out'
,
help
=
'path of the output checkpoint file'
)
args
=
parser
.
parse_args
()
return
args
def
parse_config
(
config_strings
):
"""Parse config from strings.
Args:
config_strings (string): strings of model config.
Returns:
Config: model config
"""
temp_file
=
tempfile
.
NamedTemporaryFile
()
config_path
=
f
'
{
temp_file
.
name
}
.py'
with
open
(
config_path
,
'w'
)
as
f
:
f
.
write
(
config_strings
)
config
=
Config
.
fromfile
(
config_path
)
# Update backbone config
if
'pool_mod'
in
config
.
model
.
backbone
:
config
.
model
.
backbone
.
pop
(
'pool_mod'
)
if
'sa_cfg'
not
in
config
.
model
.
backbone
:
config
.
model
.
backbone
[
'sa_cfg'
]
=
dict
(
type
=
'PointSAModule'
,
pool_mod
=
'max'
,
use_xyz
=
True
,
normalize_xyz
=
True
)
if
'type'
not
in
config
.
model
.
bbox_head
.
vote_aggregation_cfg
:
config
.
model
.
bbox_head
.
vote_aggregation_cfg
[
'type'
]
=
'PointSAModule'
# Update bbox_head config
if
'pred_layer_cfg'
not
in
config
.
model
.
bbox_head
:
config
.
model
.
bbox_head
[
'pred_layer_cfg'
]
=
dict
(
in_channels
=
128
,
shared_conv_channels
=
(
128
,
128
),
bias
=
True
)
if
'feat_channels'
in
config
.
model
.
bbox_head
:
config
.
model
.
bbox_head
.
pop
(
'feat_channels'
)
if
'vote_moudule_cfg'
in
config
.
model
.
bbox_head
:
config
.
model
.
bbox_head
[
'vote_module_cfg'
]
=
config
.
model
.
bbox_head
.
pop
(
'vote_moudule_cfg'
)
if
config
.
model
.
bbox_head
.
vote_aggregation_cfg
.
use_xyz
:
config
.
model
.
bbox_head
.
vote_aggregation_cfg
.
mlp_channels
[
0
]
-=
3
temp_file
.
close
()
return
config
def
main
():
"""Convert keys in checkpoints for VoteNet.
There can be some breaking changes during the development of mmdetection3d,
and this tool is used for upgrading checkpoints trained with old versions
(before v0.6.0) to the latest one.
"""
args
=
parse_args
()
checkpoint
=
torch
.
load
(
args
.
checkpoint
)
cfg
=
parse_config
(
checkpoint
[
'meta'
][
'config'
])
# Build the model and load checkpoint
model
=
build_detector
(
cfg
.
model
,
train_cfg
=
cfg
.
get
(
'train_cfg'
),
test_cfg
=
cfg
.
get
(
'test_cfg'
))
orig_ckpt
=
checkpoint
[
'state_dict'
]
converted_ckpt
=
orig_ckpt
.
copy
()
if
cfg
[
'dataset_type'
]
==
'ScanNetDataset'
:
NUM_CLASSES
=
18
elif
cfg
[
'dataset_type'
]
==
'SUNRGBDDataset'
:
NUM_CLASSES
=
10
else
:
raise
NotImplementedError
RENAME_PREFIX
=
{
'bbox_head.conv_pred.0'
:
'bbox_head.conv_pred.shared_convs.layer0'
,
'bbox_head.conv_pred.1'
:
'bbox_head.conv_pred.shared_convs.layer1'
}
DEL_KEYS
=
[
'bbox_head.conv_pred.0.bn.num_batches_tracked'
,
'bbox_head.conv_pred.1.bn.num_batches_tracked'
]
EXTRACT_KEYS
=
{
'bbox_head.conv_pred.conv_cls.weight'
:
(
'bbox_head.conv_pred.conv_out.weight'
,
[(
0
,
2
),
(
-
NUM_CLASSES
,
-
1
)]),
'bbox_head.conv_pred.conv_cls.bias'
:
(
'bbox_head.conv_pred.conv_out.bias'
,
[(
0
,
2
),
(
-
NUM_CLASSES
,
-
1
)]),
'bbox_head.conv_pred.conv_reg.weight'
:
(
'bbox_head.conv_pred.conv_out.weight'
,
[(
2
,
-
NUM_CLASSES
)]),
'bbox_head.conv_pred.conv_reg.bias'
:
(
'bbox_head.conv_pred.conv_out.bias'
,
[(
2
,
-
NUM_CLASSES
)])
}
# Delete some useless keys
for
key
in
DEL_KEYS
:
converted_ckpt
.
pop
(
key
)
# Rename keys with specific prefix
RENAME_KEYS
=
dict
()
for
old_key
in
converted_ckpt
.
keys
():
for
rename_prefix
in
RENAME_PREFIX
.
keys
():
if
rename_prefix
in
old_key
:
new_key
=
old_key
.
replace
(
rename_prefix
,
RENAME_PREFIX
[
rename_prefix
])
RENAME_KEYS
[
new_key
]
=
old_key
for
new_key
,
old_key
in
RENAME_KEYS
.
items
():
converted_ckpt
[
new_key
]
=
converted_ckpt
.
pop
(
old_key
)
# Extract weights and rename the keys
for
new_key
,
(
old_key
,
indices
)
in
EXTRACT_KEYS
.
items
():
cur_layers
=
orig_ckpt
[
old_key
]
converted_layers
=
[]
for
(
start
,
end
)
in
indices
:
if
end
!=
-
1
:
converted_layers
.
append
(
cur_layers
[
start
:
end
])
else
:
converted_layers
.
append
(
cur_layers
[
start
:])
converted_layers
=
torch
.
cat
(
converted_layers
,
0
)
converted_ckpt
[
new_key
]
=
converted_layers
if
old_key
in
converted_ckpt
.
keys
():
converted_ckpt
.
pop
(
old_key
)
# Check the converted checkpoint by loading to the model
load_state_dict
(
model
,
converted_ckpt
,
strict
=
True
)
checkpoint
[
'state_dict'
]
=
converted_ckpt
torch
.
save
(
checkpoint
,
args
.
out
)
if
__name__
==
'__main__'
:
main
()
autonomous_driving/openlane-v2/tools/model_converters/publish_model.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
argparse
import
subprocess
import
torch
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Process a checkpoint to be published'
)
parser
.
add_argument
(
'in_file'
,
help
=
'input checkpoint filename'
)
parser
.
add_argument
(
'out_file'
,
help
=
'output checkpoint filename'
)
args
=
parser
.
parse_args
()
return
args
def
process_checkpoint
(
in_file
,
out_file
):
checkpoint
=
torch
.
load
(
in_file
,
map_location
=
'cpu'
)
# remove optimizer for smaller file size
if
'optimizer'
in
checkpoint
:
del
checkpoint
[
'optimizer'
]
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
torch
.
save
(
checkpoint
,
out_file
)
sha
=
subprocess
.
check_output
([
'sha256sum'
,
out_file
]).
decode
()
final_file
=
out_file
.
rstrip
(
'.pth'
)
+
'-{}.pth'
.
format
(
sha
[:
8
])
subprocess
.
Popen
([
'mv'
,
out_file
,
final_file
])
def
main
():
args
=
parse_args
()
process_checkpoint
(
args
.
in_file
,
args
.
out_file
)
if
__name__
==
'__main__'
:
main
()
autonomous_driving/openlane-v2/tools/model_converters/regnet2mmdet.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
argparse
from
collections
import
OrderedDict
import
torch
def
convert_stem
(
model_key
,
model_weight
,
state_dict
,
converted_names
):
new_key
=
model_key
.
replace
(
'stem.conv'
,
'conv1'
)
new_key
=
new_key
.
replace
(
'stem.bn'
,
'bn1'
)
state_dict
[
new_key
]
=
model_weight
converted_names
.
add
(
model_key
)
print
(
f
'Convert
{
model_key
}
to
{
new_key
}
'
)
def
convert_head
(
model_key
,
model_weight
,
state_dict
,
converted_names
):
new_key
=
model_key
.
replace
(
'head.fc'
,
'fc'
)
state_dict
[
new_key
]
=
model_weight
converted_names
.
add
(
model_key
)
print
(
f
'Convert
{
model_key
}
to
{
new_key
}
'
)
def
convert_reslayer
(
model_key
,
model_weight
,
state_dict
,
converted_names
):
split_keys
=
model_key
.
split
(
'.'
)
layer
,
block
,
module
=
split_keys
[:
3
]
block_id
=
int
(
block
[
1
:])
layer_name
=
f
'layer
{
int
(
layer
[
1
:])
}
'
block_name
=
f
'
{
block_id
-
1
}
'
if
block_id
==
1
and
module
==
'bn'
:
new_key
=
f
'
{
layer_name
}
.
{
block_name
}
.downsample.1.
{
split_keys
[
-
1
]
}
'
elif
block_id
==
1
and
module
==
'proj'
:
new_key
=
f
'
{
layer_name
}
.
{
block_name
}
.downsample.0.
{
split_keys
[
-
1
]
}
'
elif
module
==
'f'
:
if
split_keys
[
3
]
==
'a_bn'
:
module_name
=
'bn1'
elif
split_keys
[
3
]
==
'b_bn'
:
module_name
=
'bn2'
elif
split_keys
[
3
]
==
'c_bn'
:
module_name
=
'bn3'
elif
split_keys
[
3
]
==
'a'
:
module_name
=
'conv1'
elif
split_keys
[
3
]
==
'b'
:
module_name
=
'conv2'
elif
split_keys
[
3
]
==
'c'
:
module_name
=
'conv3'
new_key
=
f
'
{
layer_name
}
.
{
block_name
}
.
{
module_name
}
.
{
split_keys
[
-
1
]
}
'
else
:
raise
ValueError
(
f
'Unsupported conversion of key
{
model_key
}
'
)
print
(
f
'Convert
{
model_key
}
to
{
new_key
}
'
)
state_dict
[
new_key
]
=
model_weight
converted_names
.
add
(
model_key
)
def
convert
(
src
,
dst
):
"""Convert keys in pycls pretrained RegNet models to mmdet style."""
# load caffe model
regnet_model
=
torch
.
load
(
src
)
blobs
=
regnet_model
[
'model_state'
]
# convert to pytorch style
state_dict
=
OrderedDict
()
converted_names
=
set
()
for
key
,
weight
in
blobs
.
items
():
if
'stem'
in
key
:
convert_stem
(
key
,
weight
,
state_dict
,
converted_names
)
elif
'head'
in
key
:
convert_head
(
key
,
weight
,
state_dict
,
converted_names
)
elif
key
.
startswith
(
's'
):
convert_reslayer
(
key
,
weight
,
state_dict
,
converted_names
)
# check if all layers are converted
for
key
in
blobs
:
if
key
not
in
converted_names
:
print
(
f
'not converted:
{
key
}
'
)
# save checkpoint
checkpoint
=
dict
()
checkpoint
[
'state_dict'
]
=
state_dict
torch
.
save
(
checkpoint
,
dst
)
def
main
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Convert model keys'
)
parser
.
add_argument
(
'src'
,
help
=
'src detectron model path'
)
parser
.
add_argument
(
'dst'
,
help
=
'save path'
)
args
=
parser
.
parse_args
()
convert
(
args
.
src
,
args
.
dst
)
if
__name__
==
'__main__'
:
main
()
autonomous_driving/openlane-v2/tools/slurm_test.sh
0 → 100644
View file @
305e110f
#!/usr/bin/env bash
set
-x
PARTITION
=
$1
JOB_NAME
=
$2
CONFIG
=
$3
CHECKPOINT
=
$4
GPUS
=
${
GPUS
:-
8
}
GPUS_PER_NODE
=
${
GPUS_PER_NODE
:-
8
}
CPUS_PER_TASK
=
${
CPUS_PER_TASK
:-
5
}
PY_ARGS
=
${
@
:5
}
SRUN_ARGS
=
${
SRUN_ARGS
:-
""
}
PYTHONPATH
=
"
$(
dirname
$0
)
/.."
:
$PYTHONPATH
\
srun
-p
${
PARTITION
}
\
--job-name
=
${
JOB_NAME
}
\
--gres
=
gpu:
${
GPUS_PER_NODE
}
\
--ntasks
=
${
GPUS
}
\
--ntasks-per-node
=
${
GPUS_PER_NODE
}
\
--cpus-per-task
=
${
CPUS_PER_TASK
}
\
--kill-on-bad-exit
=
1
\
${
SRUN_ARGS
}
\
python
-u
tools/test.py
${
CONFIG
}
${
CHECKPOINT
}
--launcher
=
"slurm"
${
PY_ARGS
}
autonomous_driving/openlane-v2/tools/slurm_train.sh
0 → 100644
View file @
305e110f
#!/usr/bin/env bash
set
-x
PARTITION
=
$1
JOB_NAME
=
$2
CONFIG
=
$3
WORK_DIR
=
$4
GPUS
=
${
GPUS
:-
8
}
GPUS_PER_NODE
=
${
GPUS_PER_NODE
:-
8
}
CPUS_PER_TASK
=
${
CPUS_PER_TASK
:-
5
}
SRUN_ARGS
=
${
SRUN_ARGS
:-
""
}
PY_ARGS
=
${
@
:5
}
PYTHONPATH
=
"
$(
dirname
$0
)
/.."
:
$PYTHONPATH
\
srun
-p
${
PARTITION
}
\
--job-name
=
${
JOB_NAME
}
\
--gres
=
gpu:
${
GPUS_PER_NODE
}
\
--ntasks
=
${
GPUS
}
\
--ntasks-per-node
=
${
GPUS_PER_NODE
}
\
--cpus-per-task
=
${
CPUS_PER_TASK
}
\
--kill-on-bad-exit
=
1
\
${
SRUN_ARGS
}
\
python
-u
tools/train.py
${
CONFIG
}
--work-dir
=
${
WORK_DIR
}
--launcher
=
"slurm"
${
PY_ARGS
}
autonomous_driving/openlane-v2/tools/test.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
argparse
import
os
import
warnings
import
mmcv
import
torch
from
mmcv
import
Config
,
DictAction
from
mmcv.cnn
import
fuse_conv_bn
from
mmcv.parallel
import
MMDataParallel
,
MMDistributedDataParallel
from
mmcv.runner
import
(
get_dist_info
,
init_dist
,
load_checkpoint
,
wrap_fp16_model
)
import
mmdet
from
mmdet3d.apis
import
single_gpu_test
from
mmdet3d.datasets
import
build_dataloader
,
build_dataset
from
mmdet3d.models
import
build_model
from
mmdet.apis
import
multi_gpu_test
,
set_random_seed
from
mmdet.datasets
import
replace_ImageToTensor
if
mmdet
.
__version__
>
'2.23.0'
:
# If mmdet version > 2.23.0, setup_multi_processes would be imported and
# used from mmdet instead of mmdet3d.
from
mmdet.utils
import
setup_multi_processes
else
:
from
mmdet3d.utils
import
setup_multi_processes
try
:
# If mmdet version > 2.23.0, compat_cfg would be imported and
# used from mmdet instead of mmdet3d.
from
mmdet.utils
import
compat_cfg
except
ImportError
:
from
mmdet3d.utils
import
compat_cfg
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'MMDet test (and eval) a model'
)
parser
.
add_argument
(
'config'
,
help
=
'test config file path'
)
parser
.
add_argument
(
'checkpoint'
,
help
=
'checkpoint file'
)
parser
.
add_argument
(
'--out'
,
help
=
'output result file in pickle format'
)
parser
.
add_argument
(
'--fuse-conv-bn'
,
action
=
'store_true'
,
help
=
'Whether to fuse conv and bn, this will slightly increase'
'the inference speed'
)
parser
.
add_argument
(
'--gpu-ids'
,
type
=
int
,
nargs
=
'+'
,
help
=
'(Deprecated, please use --gpu-id) ids of gpus to use '
'(only applicable to non-distributed training)'
)
parser
.
add_argument
(
'--gpu-id'
,
type
=
int
,
default
=
0
,
help
=
'id of gpu to use '
'(only applicable to non-distributed testing)'
)
parser
.
add_argument
(
'--format-only'
,
action
=
'store_true'
,
help
=
'Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server'
)
parser
.
add_argument
(
'--eval'
,
type
=
str
,
nargs
=
'+'
,
help
=
'evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC'
)
parser
.
add_argument
(
'--show'
,
action
=
'store_true'
,
help
=
'show results'
)
parser
.
add_argument
(
'--show-dir'
,
help
=
'directory where results will be saved'
)
parser
.
add_argument
(
'--gpu-collect'
,
action
=
'store_true'
,
help
=
'whether to use gpu to collect results.'
)
parser
.
add_argument
(
'--tmpdir'
,
help
=
'tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified'
)
parser
.
add_argument
(
'--seed'
,
type
=
int
,
default
=
0
,
help
=
'random seed'
)
parser
.
add_argument
(
'--deterministic'
,
action
=
'store_true'
,
help
=
'whether to set deterministic options for CUDNN backend.'
)
parser
.
add_argument
(
'--cfg-options'
,
nargs
=
'+'
,
action
=
DictAction
,
help
=
'override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.'
)
parser
.
add_argument
(
'--options'
,
nargs
=
'+'
,
action
=
DictAction
,
help
=
'custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.'
)
parser
.
add_argument
(
'--eval-options'
,
nargs
=
'+'
,
action
=
DictAction
,
help
=
'custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function'
)
parser
.
add_argument
(
'--launcher'
,
choices
=
[
'none'
,
'pytorch'
,
'slurm'
,
'mpi'
],
default
=
'none'
,
help
=
'job launcher'
)
parser
.
add_argument
(
'--local_rank'
,
type
=
int
,
default
=
0
)
args
=
parser
.
parse_args
()
if
'LOCAL_RANK'
not
in
os
.
environ
:
os
.
environ
[
'LOCAL_RANK'
]
=
str
(
args
.
local_rank
)
if
args
.
options
and
args
.
eval_options
:
raise
ValueError
(
'--options and --eval-options cannot be both specified, '
'--options is deprecated in favor of --eval-options'
)
if
args
.
options
:
warnings
.
warn
(
'--options is deprecated in favor of --eval-options'
)
args
.
eval_options
=
args
.
options
return
args
def
main
():
args
=
parse_args
()
assert
args
.
out
or
args
.
eval
or
args
.
format_only
or
args
.
show
\
or
args
.
show_dir
,
\
(
'Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"'
)
if
args
.
eval
and
args
.
format_only
:
raise
ValueError
(
'--eval and --format_only cannot be both specified'
)
if
args
.
out
is
not
None
and
not
args
.
out
.
endswith
((
'.pkl'
,
'.pickle'
)):
raise
ValueError
(
'The output file must be a pkl file.'
)
cfg
=
Config
.
fromfile
(
args
.
config
)
if
args
.
cfg_options
is
not
None
:
cfg
.
merge_from_dict
(
args
.
cfg_options
)
cfg
=
compat_cfg
(
cfg
)
# set multi-process settings
setup_multi_processes
(
cfg
)
# set cudnn_benchmark
if
cfg
.
get
(
'cudnn_benchmark'
,
False
):
torch
.
backends
.
cudnn
.
benchmark
=
True
cfg
.
model
.
pretrained
=
None
if
args
.
gpu_ids
is
not
None
:
cfg
.
gpu_ids
=
args
.
gpu_ids
[
0
:
1
]
warnings
.
warn
(
'`--gpu-ids` is deprecated, please use `--gpu-id`. '
'Because we only support single GPU mode in '
'non-distributed testing. Use the first GPU '
'in `gpu_ids` now.'
)
else
:
cfg
.
gpu_ids
=
[
args
.
gpu_id
]
# init distributed env first, since logger depends on the dist info.
if
args
.
launcher
==
'none'
:
distributed
=
False
else
:
distributed
=
True
init_dist
(
args
.
launcher
,
**
cfg
.
dist_params
)
test_dataloader_default_args
=
dict
(
samples_per_gpu
=
1
,
workers_per_gpu
=
2
,
dist
=
distributed
,
shuffle
=
False
)
# in case the test dataset is concatenated
if
isinstance
(
cfg
.
data
.
test
,
dict
):
cfg
.
data
.
test
.
test_mode
=
True
if
cfg
.
data
.
test_dataloader
.
get
(
'samples_per_gpu'
,
1
)
>
1
:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg
.
data
.
test
.
pipeline
=
replace_ImageToTensor
(
cfg
.
data
.
test
.
pipeline
)
elif
isinstance
(
cfg
.
data
.
test
,
list
):
for
ds_cfg
in
cfg
.
data
.
test
:
ds_cfg
.
test_mode
=
True
if
cfg
.
data
.
test_dataloader
.
get
(
'samples_per_gpu'
,
1
)
>
1
:
for
ds_cfg
in
cfg
.
data
.
test
:
ds_cfg
.
pipeline
=
replace_ImageToTensor
(
ds_cfg
.
pipeline
)
test_loader_cfg
=
{
**
test_dataloader_default_args
,
**
cfg
.
data
.
get
(
'test_dataloader'
,
{})
}
# set random seeds
if
args
.
seed
is
not
None
:
set_random_seed
(
args
.
seed
,
deterministic
=
args
.
deterministic
)
# build the dataloader
dataset
=
build_dataset
(
cfg
.
data
.
test
)
data_loader
=
build_dataloader
(
dataset
,
**
test_loader_cfg
)
# build the model and load checkpoint
cfg
.
model
.
train_cfg
=
None
model
=
build_model
(
cfg
.
model
,
test_cfg
=
cfg
.
get
(
'test_cfg'
))
fp16_cfg
=
cfg
.
get
(
'fp16'
,
None
)
if
fp16_cfg
is
not
None
:
wrap_fp16_model
(
model
)
checkpoint
=
load_checkpoint
(
model
,
args
.
checkpoint
,
map_location
=
'cpu'
)
if
args
.
fuse_conv_bn
:
model
=
fuse_conv_bn
(
model
)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if
'CLASSES'
in
checkpoint
.
get
(
'meta'
,
{}):
model
.
CLASSES
=
checkpoint
[
'meta'
][
'CLASSES'
]
else
:
model
.
CLASSES
=
dataset
.
CLASSES
# palette for visualization in segmentation tasks
if
'PALETTE'
in
checkpoint
.
get
(
'meta'
,
{}):
model
.
PALETTE
=
checkpoint
[
'meta'
][
'PALETTE'
]
elif
hasattr
(
dataset
,
'PALETTE'
):
# segmentation dataset has `PALETTE` attribute
model
.
PALETTE
=
dataset
.
PALETTE
if
not
distributed
:
model
=
MMDataParallel
(
model
,
device_ids
=
cfg
.
gpu_ids
)
outputs
=
single_gpu_test
(
model
,
data_loader
,
args
.
show
,
args
.
show_dir
)
else
:
model
=
MMDistributedDataParallel
(
model
.
cuda
(),
device_ids
=
[
torch
.
cuda
.
current_device
()],
broadcast_buffers
=
False
)
outputs
=
multi_gpu_test
(
model
,
data_loader
,
args
.
tmpdir
,
args
.
gpu_collect
)
rank
,
_
=
get_dist_info
()
if
rank
==
0
:
if
args
.
out
:
print
(
f
'
\n
writing results to
{
args
.
out
}
'
)
mmcv
.
dump
(
outputs
,
args
.
out
)
kwargs
=
{}
if
args
.
eval_options
is
None
else
args
.
eval_options
if
args
.
format_only
:
dataset
.
format_results
(
outputs
,
**
kwargs
)
if
args
.
eval
:
eval_kwargs
=
cfg
.
get
(
'evaluation'
,
{}).
copy
()
# hard-code way to remove EvalHook args
for
key
in
[
'interval'
,
'tmpdir'
,
'start'
,
'gpu_collect'
,
'save_best'
,
'rule'
]:
eval_kwargs
.
pop
(
key
,
None
)
eval_kwargs
.
update
(
dict
(
metric
=
args
.
eval
,
**
kwargs
))
print
(
dataset
.
evaluate
(
outputs
,
**
eval_kwargs
))
if
__name__
==
'__main__'
:
main
()
autonomous_driving/openlane-v2/tools/train.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
from
__future__
import
division
import
argparse
import
copy
import
os
import
time
import
warnings
from
os
import
path
as
osp
import
mmcv
import
torch
import
torch.distributed
as
dist
from
mmcv
import
Config
,
DictAction
from
mmcv.runner
import
get_dist_info
,
init_dist
from
mmdet
import
__version__
as
mmdet_version
from
mmdet3d
import
__version__
as
mmdet3d_version
from
mmdet3d.apis
import
init_random_seed
,
train_model
from
mmdet3d.datasets
import
build_dataset
from
mmdet3d.models
import
build_model
from
mmdet3d.utils
import
collect_env
,
get_root_logger
from
mmdet.apis
import
set_random_seed
from
mmseg
import
__version__
as
mmseg_version
try
:
# If mmdet version > 2.20.0, setup_multi_processes would be imported and
# used from mmdet instead of mmdet3d.
from
mmdet.utils
import
setup_multi_processes
except
ImportError
:
from
mmdet3d.utils
import
setup_multi_processes
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Train a detector'
)
parser
.
add_argument
(
'config'
,
help
=
'train config file path'
)
parser
.
add_argument
(
'--work-dir'
,
help
=
'the dir to save logs and models'
)
parser
.
add_argument
(
'--resume-from'
,
help
=
'the checkpoint file to resume from'
)
parser
.
add_argument
(
'--auto-resume'
,
action
=
'store_true'
,
help
=
'resume from the latest checkpoint automatically'
)
parser
.
add_argument
(
'--no-validate'
,
action
=
'store_true'
,
help
=
'whether not to evaluate the checkpoint during training'
)
group_gpus
=
parser
.
add_mutually_exclusive_group
()
group_gpus
.
add_argument
(
'--gpus'
,
type
=
int
,
help
=
'(Deprecated, please use --gpu-id) number of gpus to use '
'(only applicable to non-distributed training)'
)
group_gpus
.
add_argument
(
'--gpu-ids'
,
type
=
int
,
nargs
=
'+'
,
help
=
'(Deprecated, please use --gpu-id) ids of gpus to use '
'(only applicable to non-distributed training)'
)
group_gpus
.
add_argument
(
'--gpu-id'
,
type
=
int
,
default
=
0
,
help
=
'number of gpus to use '
'(only applicable to non-distributed training)'
)
parser
.
add_argument
(
'--seed'
,
type
=
int
,
default
=
0
,
help
=
'random seed'
)
parser
.
add_argument
(
'--diff-seed'
,
action
=
'store_true'
,
help
=
'Whether or not set different seeds for different ranks'
)
parser
.
add_argument
(
'--deterministic'
,
action
=
'store_true'
,
help
=
'whether to set deterministic options for CUDNN backend.'
)
parser
.
add_argument
(
'--options'
,
nargs
=
'+'
,
action
=
DictAction
,
help
=
'override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.'
)
parser
.
add_argument
(
'--cfg-options'
,
nargs
=
'+'
,
action
=
DictAction
,
help
=
'override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.'
)
parser
.
add_argument
(
'--launcher'
,
choices
=
[
'none'
,
'pytorch'
,
'slurm'
,
'mpi'
],
default
=
'none'
,
help
=
'job launcher'
)
parser
.
add_argument
(
'--local_rank'
,
type
=
int
,
default
=
0
)
parser
.
add_argument
(
'--autoscale-lr'
,
action
=
'store_true'
,
help
=
'automatically scale lr with the number of gpus'
)
args
=
parser
.
parse_args
()
if
'LOCAL_RANK'
not
in
os
.
environ
:
os
.
environ
[
'LOCAL_RANK'
]
=
str
(
args
.
local_rank
)
if
args
.
options
and
args
.
cfg_options
:
raise
ValueError
(
'--options and --cfg-options cannot be both specified, '
'--options is deprecated in favor of --cfg-options'
)
if
args
.
options
:
warnings
.
warn
(
'--options is deprecated in favor of --cfg-options'
)
args
.
cfg_options
=
args
.
options
return
args
def
main
():
args
=
parse_args
()
cfg
=
Config
.
fromfile
(
args
.
config
)
if
args
.
cfg_options
is
not
None
:
cfg
.
merge_from_dict
(
args
.
cfg_options
)
# set multi-process settings
setup_multi_processes
(
cfg
)
# set cudnn_benchmark
if
cfg
.
get
(
'cudnn_benchmark'
,
False
):
torch
.
backends
.
cudnn
.
benchmark
=
True
# work_dir is determined in this priority: CLI > segment in file > filename
if
args
.
work_dir
is
not
None
:
# update configs according to CLI args if args.work_dir is not None
cfg
.
work_dir
=
args
.
work_dir
elif
cfg
.
get
(
'work_dir'
,
None
)
is
None
:
# use config filename as default work_dir if cfg.work_dir is None
cfg
.
work_dir
=
osp
.
join
(
'./work_dirs'
,
osp
.
splitext
(
osp
.
basename
(
args
.
config
))[
0
])
if
args
.
resume_from
is
not
None
:
cfg
.
resume_from
=
args
.
resume_from
if
args
.
auto_resume
:
cfg
.
auto_resume
=
args
.
auto_resume
warnings
.
warn
(
'`--auto-resume` is only supported when mmdet'
'version >= 2.20.0 for 3D detection model or'
'mmsegmentation verision >= 0.21.0 for 3D'
'segmentation model'
)
if
args
.
gpus
is
not
None
:
cfg
.
gpu_ids
=
range
(
1
)
warnings
.
warn
(
'`--gpus` is deprecated because we only support '
'single GPU mode in non-distributed training. '
'Use `gpus=1` now.'
)
if
args
.
gpu_ids
is
not
None
:
cfg
.
gpu_ids
=
args
.
gpu_ids
[
0
:
1
]
warnings
.
warn
(
'`--gpu-ids` is deprecated, please use `--gpu-id`. '
'Because we only support single GPU mode in '
'non-distributed training. Use the first GPU '
'in `gpu_ids` now.'
)
if
args
.
gpus
is
None
and
args
.
gpu_ids
is
None
:
cfg
.
gpu_ids
=
[
args
.
gpu_id
]
if
args
.
autoscale_lr
:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg
.
optimizer
[
'lr'
]
=
cfg
.
optimizer
[
'lr'
]
*
len
(
cfg
.
gpu_ids
)
/
8
# init distributed env first, since logger depends on the dist info.
if
args
.
launcher
==
'none'
:
distributed
=
False
else
:
distributed
=
True
init_dist
(
args
.
launcher
,
**
cfg
.
dist_params
)
# re-set gpu_ids with distributed training mode
_
,
world_size
=
get_dist_info
()
cfg
.
gpu_ids
=
range
(
world_size
)
# create work_dir
mmcv
.
mkdir_or_exist
(
osp
.
abspath
(
cfg
.
work_dir
))
# dump config
cfg
.
dump
(
osp
.
join
(
cfg
.
work_dir
,
osp
.
basename
(
args
.
config
)))
# init the logger before other steps
timestamp
=
time
.
strftime
(
'%Y%m%d_%H%M%S'
,
time
.
localtime
())
log_file
=
osp
.
join
(
cfg
.
work_dir
,
f
'
{
timestamp
}
.log'
)
# specify logger name, if we still use 'mmdet', the output info will be
# filtered and won't be saved in the log_file
# TODO: ugly workaround to judge whether we are training det or seg model
if
cfg
.
model
.
type
in
[
'EncoderDecoder3D'
]:
logger_name
=
'mmseg'
else
:
logger_name
=
'mmdet'
logger
=
get_root_logger
(
log_file
=
log_file
,
log_level
=
cfg
.
log_level
,
name
=
logger_name
)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta
=
dict
()
# log env info
env_info_dict
=
collect_env
()
env_info
=
'
\n
'
.
join
([(
f
'
{
k
}
:
{
v
}
'
)
for
k
,
v
in
env_info_dict
.
items
()])
dash_line
=
'-'
*
60
+
'
\n
'
logger
.
info
(
'Environment info:
\n
'
+
dash_line
+
env_info
+
'
\n
'
+
dash_line
)
meta
[
'env_info'
]
=
env_info
meta
[
'config'
]
=
cfg
.
pretty_text
# log some basic info
logger
.
info
(
f
'Distributed training:
{
distributed
}
'
)
logger
.
info
(
f
'Config:
\n
{
cfg
.
pretty_text
}
'
)
# set random seeds
seed
=
init_random_seed
(
args
.
seed
)
seed
=
seed
+
dist
.
get_rank
()
if
args
.
diff_seed
else
seed
logger
.
info
(
f
'Set random seed to
{
seed
}
, '
f
'deterministic:
{
args
.
deterministic
}
'
)
set_random_seed
(
seed
,
deterministic
=
args
.
deterministic
)
cfg
.
seed
=
seed
meta
[
'seed'
]
=
seed
meta
[
'exp_name'
]
=
osp
.
basename
(
args
.
config
)
model
=
build_model
(
cfg
.
model
,
train_cfg
=
cfg
.
get
(
'train_cfg'
),
test_cfg
=
cfg
.
get
(
'test_cfg'
))
model
.
init_weights
()
logger
.
info
(
f
'Model:
\n
{
model
}
'
)
datasets
=
[
build_dataset
(
cfg
.
data
.
train
)]
if
len
(
cfg
.
workflow
)
==
2
:
val_dataset
=
copy
.
deepcopy
(
cfg
.
data
.
val
)
# in case we use a dataset wrapper
if
'dataset'
in
cfg
.
data
.
train
:
val_dataset
.
pipeline
=
cfg
.
data
.
train
.
dataset
.
pipeline
else
:
val_dataset
.
pipeline
=
cfg
.
data
.
train
.
pipeline
# set test_mode=False here in deep copied config
# which do not affect AP/AR calculation later
# refer to https://mmdetection3d.readthedocs.io/en/latest/tutorials/customize_runtime.html#customize-workflow # noqa
val_dataset
.
test_mode
=
False
datasets
.
append
(
build_dataset
(
val_dataset
))
if
cfg
.
checkpoint_config
is
not
None
:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg
.
checkpoint_config
.
meta
=
dict
(
mmdet_version
=
mmdet_version
,
mmseg_version
=
mmseg_version
,
mmdet3d_version
=
mmdet3d_version
,
config
=
cfg
.
pretty_text
,
CLASSES
=
datasets
[
0
].
CLASSES
,
PALETTE
=
datasets
[
0
].
PALETTE
# for segmentors
if
hasattr
(
datasets
[
0
],
'PALETTE'
)
else
None
)
# add an attribute for visualization convenience
model
.
CLASSES
=
datasets
[
0
].
CLASSES
train_model
(
model
,
datasets
,
cfg
,
distributed
=
distributed
,
validate
=
(
not
args
.
no_validate
),
timestamp
=
timestamp
,
meta
=
meta
)
if
__name__
==
'__main__'
:
main
()
autonomous_driving/openlane-v2/tools/update_data_coords.py
0 → 100644
View file @
305e110f
import
argparse
import
time
from
os
import
path
as
osp
import
mmcv
import
numpy
as
np
from
mmdet3d.core.bbox
import
limit_period
def
update_sunrgbd_infos
(
root_dir
,
out_dir
,
pkl_files
):
print
(
f
'
{
pkl_files
}
will be modified because '
f
'of the refactor of the Depth coordinate system.'
)
if
root_dir
==
out_dir
:
print
(
f
'Warning, you are overwriting '
f
'the original data under
{
root_dir
}
.'
)
time
.
sleep
(
3
)
for
pkl_file
in
pkl_files
:
in_path
=
osp
.
join
(
root_dir
,
pkl_file
)
print
(
f
'Reading from input file:
{
in_path
}
.'
)
a
=
mmcv
.
load
(
in_path
)
print
(
'Start updating:'
)
for
item
in
mmcv
.
track_iter_progress
(
a
):
if
'rotation_y'
in
item
[
'annos'
]:
item
[
'annos'
][
'rotation_y'
]
=
-
item
[
'annos'
][
'rotation_y'
]
item
[
'annos'
][
'gt_boxes_upright_depth'
][:,
-
1
:]
=
\
-
item
[
'annos'
][
'gt_boxes_upright_depth'
][:,
-
1
:]
out_path
=
osp
.
join
(
out_dir
,
pkl_file
)
print
(
f
'Writing to output file:
{
out_path
}
.'
)
mmcv
.
dump
(
a
,
out_path
,
'pkl'
)
def
update_outdoor_dbinfos
(
root_dir
,
out_dir
,
pkl_files
):
print
(
f
'
{
pkl_files
}
will be modified because '
f
'of the refactor of the LIDAR coordinate system.'
)
if
root_dir
==
out_dir
:
print
(
f
'Warning, you are overwriting '
f
'the original data under
{
root_dir
}
.'
)
time
.
sleep
(
3
)
for
pkl_file
in
pkl_files
:
in_path
=
osp
.
join
(
root_dir
,
pkl_file
)
print
(
f
'Reading from input file:
{
in_path
}
.'
)
a
=
mmcv
.
load
(
in_path
)
print
(
'Start updating:'
)
for
k
in
a
.
keys
():
print
(
f
'Updating samples of class
{
k
}
:'
)
for
item
in
mmcv
.
track_iter_progress
(
a
[
k
]):
boxes
=
item
[
'box3d_lidar'
].
copy
()
# swap l, w (or dx, dy)
item
[
'box3d_lidar'
][
3
]
=
boxes
[
4
]
item
[
'box3d_lidar'
][
4
]
=
boxes
[
3
]
# change yaw
item
[
'box3d_lidar'
][
6
]
=
-
boxes
[
6
]
-
np
.
pi
/
2
item
[
'box3d_lidar'
][
6
]
=
limit_period
(
item
[
'box3d_lidar'
][
6
],
period
=
np
.
pi
*
2
)
out_path
=
osp
.
join
(
out_dir
,
pkl_file
)
print
(
f
'Writing to output file:
{
out_path
}
.'
)
mmcv
.
dump
(
a
,
out_path
,
'pkl'
)
def
update_nuscenes_or_lyft_infos
(
root_dir
,
out_dir
,
pkl_files
):
print
(
f
'
{
pkl_files
}
will be modified because '
f
'of the refactor of the LIDAR coordinate system.'
)
if
root_dir
==
out_dir
:
print
(
f
'Warning, you are overwriting '
f
'the original data under
{
root_dir
}
.'
)
time
.
sleep
(
3
)
for
pkl_file
in
pkl_files
:
in_path
=
osp
.
join
(
root_dir
,
pkl_file
)
print
(
f
'Reading from input file:
{
in_path
}
.'
)
a
=
mmcv
.
load
(
in_path
)
print
(
'Start updating:'
)
for
item
in
mmcv
.
track_iter_progress
(
a
[
'infos'
]):
boxes
=
item
[
'gt_boxes'
].
copy
()
# swap l, w (or dx, dy)
item
[
'gt_boxes'
][:,
3
]
=
boxes
[:,
4
]
item
[
'gt_boxes'
][:,
4
]
=
boxes
[:,
3
]
# change yaw
item
[
'gt_boxes'
][:,
6
]
=
-
boxes
[:,
6
]
-
np
.
pi
/
2
item
[
'gt_boxes'
][:,
6
]
=
limit_period
(
item
[
'gt_boxes'
][:,
6
],
period
=
np
.
pi
*
2
)
out_path
=
osp
.
join
(
out_dir
,
pkl_file
)
print
(
f
'Writing to output file:
{
out_path
}
.'
)
mmcv
.
dump
(
a
,
out_path
,
'pkl'
)
parser
=
argparse
.
ArgumentParser
(
description
=
'Arg parser for data coords '
'update due to coords sys refactor.'
)
parser
.
add_argument
(
'dataset'
,
metavar
=
'kitti'
,
help
=
'name of the dataset'
)
parser
.
add_argument
(
'--root-dir'
,
type
=
str
,
default
=
'./data/kitti'
,
help
=
'specify the root dir of dataset'
)
parser
.
add_argument
(
'--version'
,
type
=
str
,
default
=
'v1.0'
,
required
=
False
,
help
=
'specify the dataset version, no need for kitti'
)
parser
.
add_argument
(
'--out-dir'
,
type
=
str
,
default
=
None
,
required
=
False
,
help
=
'name of info pkl'
)
args
=
parser
.
parse_args
()
if
__name__
==
'__main__'
:
if
args
.
out_dir
is
None
:
args
.
out_dir
=
args
.
root_dir
if
args
.
dataset
==
'kitti'
:
# KITTI infos is in CAM coord sys (unchanged)
# KITTI dbinfos is in LIDAR coord sys (changed)
# so we only update dbinfos
pkl_files
=
[
'kitti_dbinfos_train.pkl'
]
update_outdoor_dbinfos
(
root_dir
=
args
.
root_dir
,
out_dir
=
args
.
out_dir
,
pkl_files
=
pkl_files
)
elif
args
.
dataset
==
'nuscenes'
:
# nuScenes infos is in LIDAR coord sys (changed)
# nuScenes dbinfos is in LIDAR coord sys (changed)
# so we update both infos and dbinfos
pkl_files
=
[
'nuscenes_infos_val.pkl'
]
if
args
.
version
!=
'v1.0-mini'
:
pkl_files
.
append
(
'nuscenes_infos_train.pkl'
)
else
:
pkl_files
.
append
(
'nuscenes_infos_train_tiny.pkl'
)
update_nuscenes_or_lyft_infos
(
root_dir
=
args
.
root_dir
,
out_dir
=
args
.
out_dir
,
pkl_files
=
pkl_files
)
if
args
.
version
!=
'v1.0-mini'
:
pkl_files
=
[
'nuscenes_dbinfos_train.pkl'
]
update_outdoor_dbinfos
(
root_dir
=
args
.
root_dir
,
out_dir
=
args
.
out_dir
,
pkl_files
=
pkl_files
)
elif
args
.
dataset
==
'lyft'
:
# Lyft infos is in LIDAR coord sys (changed)
# Lyft has no dbinfos
# so we update infos
pkl_files
=
[
'lyft_infos_train.pkl'
,
'lyft_infos_val.pkl'
]
update_nuscenes_or_lyft_infos
(
root_dir
=
args
.
root_dir
,
out_dir
=
args
.
out_dir
,
pkl_files
=
pkl_files
)
elif
args
.
dataset
==
'waymo'
:
# Waymo infos is in CAM coord sys (unchanged)
# Waymo dbinfos is in LIDAR coord sys (changed)
# so we only update dbinfos
pkl_files
=
[
'waymo_dbinfos_train.pkl'
]
update_outdoor_dbinfos
(
root_dir
=
args
.
root_dir
,
out_dir
=
args
.
out_dir
,
pkl_files
=
pkl_files
)
elif
args
.
dataset
==
'scannet'
:
# ScanNet infos is in DEPTH coord sys (changed)
# but bbox is without yaw
# so ScanNet is unaffected
pass
elif
args
.
dataset
==
's3dis'
:
# Segmentation datasets are not affected
pass
elif
args
.
dataset
==
'sunrgbd'
:
# SUNRGBD infos is in DEPTH coord sys (changed)
# and bbox is with yaw
# so we update infos
pkl_files
=
[
'sunrgbd_infos_train.pkl'
,
'sunrgbd_infos_val.pkl'
]
update_sunrgbd_infos
(
root_dir
=
args
.
root_dir
,
out_dir
=
args
.
out_dir
,
pkl_files
=
pkl_files
)
autonomous_driving/openlane-v2/tools/update_data_coords.sh
0 → 100644
View file @
305e110f
#!/usr/bin/env bash
set
-x
export
PYTHONPATH
=
`
pwd
`
:
$PYTHONPATH
PARTITION
=
$1
DATASET
=
$2
GPUS
=
${
GPUS
:-
1
}
GPUS_PER_NODE
=
${
GPUS_PER_NODE
:-
1
}
SRUN_ARGS
=
${
SRUN_ARGS
:-
""
}
JOB_NAME
=
update_data_coords
srun
-p
${
PARTITION
}
\
--job-name
=
${
JOB_NAME
}
\
--gres
=
gpu:
${
GPUS_PER_NODE
}
\
--ntasks
=
${
GPUS
}
\
--ntasks-per-node
=
${
GPUS_PER_NODE
}
\
--kill-on-bad-exit
=
1
\
${
SRUN_ARGS
}
\
python
-u
tools/update_data_coords.py
${
DATASET
}
\
--root-dir
./data/
${
DATASET
}
\
--out-dir
./data/
${
DATASET
}
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment