Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dcnv3
Commits
198ca8f9
Unverified
Commit
198ca8f9
authored
May 12, 2023
by
YeShenglong1
Committed by
GitHub
May 12, 2023
Browse files
Add files via upload
parent
6a31be8f
Changes
9
Show whitespace changes
Inline
Side-by-side
Showing
9 changed files
with
1210 additions
and
0 deletions
+1210
-0
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/dist_test.sh
...ng/Online-HD-Map-Construction-CVPR2023/tools/dist_test.sh
+10
-0
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/dist_train.sh
...g/Online-HD-Map-Construction-CVPR2023/tools/dist_train.sh
+9
-0
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/evaluate_submission.py
...HD-Map-Construction-CVPR2023/tools/evaluate_submission.py
+28
-0
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/mmdet_test.py
...g/Online-HD-Map-Construction-CVPR2023/tools/mmdet_test.py
+190
-0
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/mmdet_train.py
.../Online-HD-Map-Construction-CVPR2023/tools/mmdet_train.py
+170
-0
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/test.py
...driving/Online-HD-Map-Construction-CVPR2023/tools/test.py
+196
-0
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/train.py
...riving/Online-HD-Map-Construction-CVPR2023/tools/train.py
+261
-0
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/visualization/renderer.py
...Map-Construction-CVPR2023/tools/visualization/renderer.py
+222
-0
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/visualization/visualize.py
...ap-Construction-CVPR2023/tools/visualization/visualize.py
+124
-0
No files found.
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/dist_test.sh
0 → 100644
View file @
198ca8f9
#!/usr/bin/env bash
CONFIG
=
$1
CHECKPOINT
=
$2
GPUS
=
$3
PORT
=
${
PORT
:-
29500
}
PYTHONPATH
=
"
$(
dirname
$0
)
/.."
:
$PYTHONPATH
\
python
-m
torch.distributed.launch
--nproc_per_node
=
$GPUS
--master_port
=
$PORT
\
$(
dirname
"
$0
"
)
/test.py
$CONFIG
$CHECKPOINT
--launcher
pytorch
${
@
:4
}
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/dist_train.sh
0 → 100644
View file @
198ca8f9
#!/usr/bin/env bash
CONFIG
=
$1
GPUS
=
$2
PORT
=
${
PORT
:-
29500
}
PYTHONPATH
=
"
$(
dirname
$0
)
/.."
:
$PYTHONPATH
\
python
-m
torch.distributed.launch
--nproc_per_node
=
$GPUS
--master_port
=
$PORT
\
$(
dirname
"
$0
"
)
/train.py
$CONFIG
--launcher
pytorch
${
@
:3
}
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/evaluate_submission.py
0 → 100644
View file @
198ca8f9
import
sys
import
os
sys
.
path
.
append
(
os
.
path
.
abspath
(
'.'
))
from
src.datasets.evaluation.vector_eval
import
VectorEvaluate
import
argparse
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Evaluate a submission file'
)
parser
.
add_argument
(
'submission'
,
help
=
'submission file in pickle or json format to be evaluated'
)
parser
.
add_argument
(
'gt'
,
help
=
'gt annotation file'
)
args
=
parser
.
parse_args
()
return
args
def
main
(
args
):
evaluator
=
VectorEvaluate
(
args
.
gt
,
n_workers
=
0
)
results
=
evaluator
.
evaluate
(
args
.
submission
)
print
(
results
)
if
__name__
==
'__main__'
:
args
=
parse_args
()
main
(
args
)
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/mmdet_test.py
0 → 100644
View file @
198ca8f9
import
os.path
as
osp
import
pickle
import
shutil
import
tempfile
import
time
import
mmcv
import
torch
import
torch.distributed
as
dist
from
mmcv.image
import
tensor2imgs
from
mmcv.runner
import
get_dist_info
from
mmdet.core
import
encode_mask_results
def
single_gpu_test
(
model
,
data_loader
,
show
=
False
,
out_dir
=
None
,
show_score_thr
=
0.3
):
model
.
eval
()
results
=
[]
dataset
=
data_loader
.
dataset
prog_bar
=
mmcv
.
ProgressBar
(
len
(
dataset
))
for
i
,
data
in
enumerate
(
data_loader
):
with
torch
.
no_grad
():
result
=
model
(
return_loss
=
False
,
rescale
=
True
,
**
data
)
batch_size
=
len
(
result
)
if
show
or
out_dir
:
if
batch_size
==
1
and
isinstance
(
data
[
'img'
][
0
],
torch
.
Tensor
):
img_tensor
=
data
[
'img'
][
0
]
else
:
img_tensor
=
data
[
'img'
][
0
].
data
[
0
]
img_metas
=
data
[
'img_metas'
][
0
].
data
[
0
]
imgs
=
tensor2imgs
(
img_tensor
,
**
img_metas
[
0
][
'img_norm_cfg'
])
assert
len
(
imgs
)
==
len
(
img_metas
)
for
i
,
(
img
,
img_meta
)
in
enumerate
(
zip
(
imgs
,
img_metas
)):
h
,
w
,
_
=
img_meta
[
'img_shape'
]
img_show
=
img
[:
h
,
:
w
,
:]
ori_h
,
ori_w
=
img_meta
[
'ori_shape'
][:
-
1
]
img_show
=
mmcv
.
imresize
(
img_show
,
(
ori_w
,
ori_h
))
if
out_dir
:
out_file
=
osp
.
join
(
out_dir
,
img_meta
[
'ori_filename'
])
else
:
out_file
=
None
model
.
module
.
show_result
(
img_show
,
result
[
i
],
show
=
show
,
out_file
=
out_file
,
score_thr
=
show_score_thr
)
# encode mask results
if
isinstance
(
result
[
0
],
tuple
):
result
=
[(
bbox_results
,
encode_mask_results
(
mask_results
))
for
bbox_results
,
mask_results
in
result
]
results
.
extend
(
result
)
for
_
in
range
(
batch_size
):
prog_bar
.
update
()
return
results
def
multi_gpu_test
(
model
,
data_loader
,
tmpdir
=
None
,
gpu_collect
=
False
):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
Returns:
list: The prediction results.
"""
model
.
eval
()
results
=
[]
dataset
=
data_loader
.
dataset
rank
,
world_size
=
get_dist_info
()
if
rank
==
0
:
prog_bar
=
mmcv
.
ProgressBar
(
len
(
dataset
))
time
.
sleep
(
2
)
# This line can prevent deadlock problem in some cases.
for
i
,
data
in
enumerate
(
data_loader
):
with
torch
.
no_grad
():
result
=
model
(
return_loss
=
False
,
rescale
=
True
,
**
data
)
# encode mask results
# if isinstance(result[0], tuple):
# result = [(bbox_results, encode_mask_results(mask_results))
# for bbox_results, mask_results in result]
results
.
extend
(
result
)
if
rank
==
0
:
batch_size
=
len
(
result
)
for
_
in
range
(
batch_size
*
world_size
):
prog_bar
.
update
()
# collect results from all ranks
if
gpu_collect
:
results
=
collect_results_gpu
(
results
,
len
(
dataset
))
else
:
results
=
collect_results_cpu
(
results
,
len
(
dataset
),
tmpdir
)
return
results
def
collect_results_cpu
(
result_part
,
size
,
tmpdir
=
None
):
rank
,
world_size
=
get_dist_info
()
# create a tmp dir if it is not specified
if
tmpdir
is
None
:
MAX_LEN
=
512
# 32 is whitespace
dir_tensor
=
torch
.
full
((
MAX_LEN
,
),
32
,
dtype
=
torch
.
uint8
,
device
=
'cuda'
)
if
rank
==
0
:
mmcv
.
mkdir_or_exist
(
'.dist_test'
)
tmpdir
=
tempfile
.
mkdtemp
(
dir
=
'.dist_test'
)
tmpdir
=
torch
.
tensor
(
bytearray
(
tmpdir
.
encode
()),
dtype
=
torch
.
uint8
,
device
=
'cuda'
)
dir_tensor
[:
len
(
tmpdir
)]
=
tmpdir
dist
.
broadcast
(
dir_tensor
,
0
)
tmpdir
=
dir_tensor
.
cpu
().
numpy
().
tobytes
().
decode
().
rstrip
()
else
:
mmcv
.
mkdir_or_exist
(
tmpdir
)
# dump the part result to the dir
mmcv
.
dump
(
result_part
,
osp
.
join
(
tmpdir
,
f
'part_
{
rank
}
.pkl'
))
dist
.
barrier
()
# collect all parts
if
rank
!=
0
:
return
None
else
:
# load results of all parts from tmp dir
part_list
=
[]
for
i
in
range
(
world_size
):
part_file
=
osp
.
join
(
tmpdir
,
f
'part_
{
i
}
.pkl'
)
part_list
.
append
(
mmcv
.
load
(
part_file
))
# sort the results
ordered_results
=
[]
for
res
in
zip
(
*
part_list
):
ordered_results
.
extend
(
list
(
res
))
# the dataloader may pad some samples
ordered_results
=
ordered_results
[:
size
]
# remove tmp dir
shutil
.
rmtree
(
tmpdir
)
return
ordered_results
def
collect_results_gpu
(
result_part
,
size
):
rank
,
world_size
=
get_dist_info
()
# dump result part to tensor with pickle
part_tensor
=
torch
.
tensor
(
bytearray
(
pickle
.
dumps
(
result_part
)),
dtype
=
torch
.
uint8
,
device
=
'cuda'
)
# gather all result part tensor shape
shape_tensor
=
torch
.
tensor
(
part_tensor
.
shape
,
device
=
'cuda'
)
shape_list
=
[
shape_tensor
.
clone
()
for
_
in
range
(
world_size
)]
dist
.
all_gather
(
shape_list
,
shape_tensor
)
# padding result part tensor to max length
shape_max
=
torch
.
tensor
(
shape_list
).
max
()
part_send
=
torch
.
zeros
(
shape_max
,
dtype
=
torch
.
uint8
,
device
=
'cuda'
)
part_send
[:
shape_tensor
[
0
]]
=
part_tensor
part_recv_list
=
[
part_tensor
.
new_zeros
(
shape_max
)
for
_
in
range
(
world_size
)
]
# gather all result part
dist
.
all_gather
(
part_recv_list
,
part_send
)
if
rank
==
0
:
part_list
=
[]
for
recv
,
shape
in
zip
(
part_recv_list
,
shape_list
):
part_list
.
append
(
pickle
.
loads
(
recv
[:
shape
[
0
]].
cpu
().
numpy
().
tobytes
()))
# sort the results
ordered_results
=
[]
for
res
in
zip
(
*
part_list
):
ordered_results
.
extend
(
list
(
res
))
# the dataloader may pad some samples
ordered_results
=
ordered_results
[:
size
]
return
ordered_results
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/mmdet_train.py
0 → 100644
View file @
198ca8f9
import
random
import
warnings
import
numpy
as
np
import
torch
from
mmcv.parallel
import
MMDataParallel
,
MMDistributedDataParallel
from
mmcv.runner
import
(
HOOKS
,
DistSamplerSeedHook
,
EpochBasedRunner
,
Fp16OptimizerHook
,
OptimizerHook
,
build_optimizer
,
build_runner
)
from
mmcv.utils
import
build_from_cfg
from
mmdet.core
import
DistEvalHook
,
EvalHook
from
mmdet.datasets
import
(
build_dataloader
,
build_dataset
,
replace_ImageToTensor
)
from
mmdet.utils
import
get_root_logger
def
set_random_seed
(
seed
,
deterministic
=
False
):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random
.
seed
(
seed
)
np
.
random
.
seed
(
seed
)
torch
.
manual_seed
(
seed
)
torch
.
cuda
.
manual_seed_all
(
seed
)
if
deterministic
:
torch
.
backends
.
cudnn
.
deterministic
=
True
torch
.
backends
.
cudnn
.
benchmark
=
False
def
train_detector
(
model
,
dataset
,
cfg
,
distributed
=
False
,
validate
=
False
,
timestamp
=
None
,
meta
=
None
):
logger
=
get_root_logger
(
cfg
.
log_level
)
# prepare data loaders
dataset
=
dataset
if
isinstance
(
dataset
,
(
list
,
tuple
))
else
[
dataset
]
if
'imgs_per_gpu'
in
cfg
.
data
:
logger
.
warning
(
'"imgs_per_gpu" is deprecated in MMDet V2.0. '
'Please use "samples_per_gpu" instead'
)
if
'samples_per_gpu'
in
cfg
.
data
:
logger
.
warning
(
f
'Got "imgs_per_gpu"=
{
cfg
.
data
.
imgs_per_gpu
}
and '
f
'"samples_per_gpu"=
{
cfg
.
data
.
samples_per_gpu
}
, "imgs_per_gpu"'
f
'=
{
cfg
.
data
.
imgs_per_gpu
}
is used in this experiments'
)
else
:
logger
.
warning
(
'Automatically set "samples_per_gpu"="imgs_per_gpu"='
f
'
{
cfg
.
data
.
imgs_per_gpu
}
in this experiments'
)
cfg
.
data
.
samples_per_gpu
=
cfg
.
data
.
imgs_per_gpu
data_loaders
=
[
build_dataloader
(
ds
,
cfg
.
data
.
samples_per_gpu
,
cfg
.
data
.
workers_per_gpu
,
# cfg.gpus will be ignored if distributed
len
(
cfg
.
gpu_ids
),
dist
=
distributed
,
seed
=
cfg
.
seed
)
for
ds
in
dataset
]
# put model on gpus
if
distributed
:
find_unused_parameters
=
cfg
.
get
(
'find_unused_parameters'
,
False
)
# Sets the `find_unused_parameters` parameter in
# torch.nn.parallel.DistributedDataParallel
model
=
MMDistributedDataParallel
(
model
.
cuda
(),
device_ids
=
[
torch
.
cuda
.
current_device
()],
broadcast_buffers
=
False
,
find_unused_parameters
=
find_unused_parameters
)
else
:
model
=
MMDataParallel
(
model
.
cuda
(
cfg
.
gpu_ids
[
0
]),
device_ids
=
cfg
.
gpu_ids
)
# build runner
optimizer
=
build_optimizer
(
model
,
cfg
.
optimizer
)
if
'runner'
not
in
cfg
:
cfg
.
runner
=
{
'type'
:
'EpochBasedRunner'
,
'max_epochs'
:
cfg
.
total_epochs
}
warnings
.
warn
(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.'
,
UserWarning
)
else
:
if
'total_epochs'
in
cfg
:
assert
cfg
.
total_epochs
==
cfg
.
runner
.
max_epochs
runner
=
build_runner
(
cfg
.
runner
,
default_args
=
dict
(
model
=
model
,
optimizer
=
optimizer
,
work_dir
=
cfg
.
work_dir
,
logger
=
logger
,
meta
=
meta
))
# an ugly workaround to make .log and .log.json filenames the same
runner
.
timestamp
=
timestamp
# fp16 setting
fp16_cfg
=
cfg
.
get
(
'fp16'
,
None
)
if
fp16_cfg
is
not
None
:
optimizer_config
=
Fp16OptimizerHook
(
**
cfg
.
optimizer_config
,
**
fp16_cfg
,
distributed
=
distributed
)
elif
distributed
and
'type'
not
in
cfg
.
optimizer_config
:
optimizer_config
=
OptimizerHook
(
**
cfg
.
optimizer_config
)
else
:
optimizer_config
=
cfg
.
optimizer_config
# register hooks
runner
.
register_training_hooks
(
cfg
.
lr_config
,
optimizer_config
,
cfg
.
checkpoint_config
,
cfg
.
log_config
,
cfg
.
get
(
'momentum_config'
,
None
))
if
distributed
:
if
isinstance
(
runner
,
EpochBasedRunner
):
runner
.
register_hook
(
DistSamplerSeedHook
())
# register eval hooks
if
validate
:
# Support batch_size > 1 in validation
val_samples_per_gpu
=
cfg
.
data
.
val
.
pop
(
'samples_per_gpu'
,
1
)
if
val_samples_per_gpu
>
1
:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg
.
data
.
val
.
pipeline
=
replace_ImageToTensor
(
cfg
.
data
.
val
.
pipeline
)
val_dataset
=
build_dataset
(
cfg
.
data
.
val
,
dict
(
test_mode
=
True
))
val_dataloader
=
build_dataloader
(
val_dataset
,
samples_per_gpu
=
val_samples_per_gpu
,
workers_per_gpu
=
cfg
.
data
.
workers_per_gpu
,
dist
=
distributed
,
shuffle
=
False
)
eval_cfg
=
cfg
.
get
(
'evaluation'
,
{})
eval_cfg
[
'by_epoch'
]
=
cfg
.
runner
[
'type'
]
!=
'IterBasedRunner'
eval_hook
=
DistEvalHook
if
distributed
else
EvalHook
runner
.
register_hook
(
eval_hook
(
val_dataloader
,
**
eval_cfg
))
# user-defined hooks
if
cfg
.
get
(
'custom_hooks'
,
None
):
custom_hooks
=
cfg
.
custom_hooks
assert
isinstance
(
custom_hooks
,
list
),
\
f
'custom_hooks expect list type, but got
{
type
(
custom_hooks
)
}
'
for
hook_cfg
in
cfg
.
custom_hooks
:
assert
isinstance
(
hook_cfg
,
dict
),
\
'Each item in custom_hooks expects dict type, but got '
\
f
'
{
type
(
hook_cfg
)
}
'
hook_cfg
=
hook_cfg
.
copy
()
priority
=
hook_cfg
.
pop
(
'priority'
,
'NORMAL'
)
hook
=
build_from_cfg
(
hook_cfg
,
HOOKS
)
runner
.
register_hook
(
hook
,
priority
=
priority
)
if
cfg
.
resume_from
:
runner
.
resume
(
cfg
.
resume_from
)
elif
cfg
.
load_from
:
runner
.
load_checkpoint
(
cfg
.
load_from
)
runner
.
run
(
data_loaders
,
cfg
.
workflow
)
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/test.py
0 → 100644
View file @
198ca8f9
import
argparse
import
mmcv
import
os
import
os.path
as
osp
import
torch
import
warnings
from
mmcv
import
Config
,
DictAction
from
mmcv.cnn
import
fuse_conv_bn
from
mmcv.parallel
import
MMDataParallel
,
MMDistributedDataParallel
from
mmcv.runner
import
(
get_dist_info
,
init_dist
,
load_checkpoint
,
wrap_fp16_model
)
from
mmdet3d.apis
import
single_gpu_test
from
mmdet3d.datasets
import
build_dataloader
,
build_dataset
from
mmdet3d.models
import
build_model
from
mmdet_test
import
multi_gpu_test
from
mmdet_train
import
set_random_seed
from
mmdet.datasets
import
replace_ImageToTensor
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'MMDet test (and eval) a model'
)
parser
.
add_argument
(
'config'
,
help
=
'test config file path'
)
parser
.
add_argument
(
'checkpoint'
,
type
=
str
,
help
=
'checkpoint file'
)
parser
.
add_argument
(
'--split'
,
type
=
str
,
required
=
True
,
help
=
'which split to test on'
)
parser
.
add_argument
(
'--work-dir'
,
help
=
'the dir to save logs and models'
)
parser
.
add_argument
(
'--fuse-conv-bn'
,
action
=
'store_true'
,
help
=
'Whether to fuse conv and bn, this will slightly increase'
'the inference speed'
)
parser
.
add_argument
(
'--format-only'
,
action
=
'store_true'
,
help
=
'Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server'
)
parser
.
add_argument
(
'--eval'
,
action
=
'store_true'
,
help
=
'whether to run evaluation.'
)
parser
.
add_argument
(
'--gpu-collect'
,
action
=
'store_true'
,
help
=
'whether to use gpu to collect results.'
)
parser
.
add_argument
(
'--tmpdir'
,
help
=
'tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified'
)
parser
.
add_argument
(
'--seed'
,
type
=
int
,
default
=
0
,
help
=
'random seed'
)
parser
.
add_argument
(
'--deterministic'
,
action
=
'store_true'
,
help
=
'whether to set deterministic options for CUDNN backend.'
)
parser
.
add_argument
(
'--launcher'
,
choices
=
[
'none'
,
'pytorch'
,
'slurm'
,
'mpi'
],
default
=
'none'
,
help
=
'job launcher'
)
parser
.
add_argument
(
'--local_rank'
,
type
=
int
,
default
=
0
)
args
=
parser
.
parse_args
()
if
'LOCAL_RANK'
not
in
os
.
environ
:
os
.
environ
[
'LOCAL_RANK'
]
=
str
(
args
.
local_rank
)
return
args
def
main
():
args
=
parse_args
()
if
args
.
split
not
in
[
'val'
,
'test'
]:
raise
ValueError
(
'Please choose "val" or "test" split for testing'
)
if
(
args
.
eval
and
args
.
format_only
)
or
(
not
args
.
eval
and
not
args
.
format_only
):
raise
ValueError
(
'Please specify exactly one operation (eval/format) '
'with the argument "--eval" or "--format-only"'
)
if
args
.
eval
and
args
.
split
==
'test'
:
raise
ValueError
(
'Cannot evaluate on test set'
)
cfg
=
Config
.
fromfile
(
args
.
config
)
# import modules from string list.
if
cfg
.
get
(
'custom_imports'
,
None
):
from
mmcv.utils
import
import_modules_from_strings
import_modules_from_strings
(
**
cfg
[
'custom_imports'
])
# set cudnn_benchmark
if
cfg
.
get
(
'cudnn_benchmark'
,
False
):
torch
.
backends
.
cudnn
.
benchmark
=
True
# import modules from plguin/xx, registry will be updated
import
sys
sys
.
path
.
append
(
os
.
path
.
abspath
(
'.'
))
if
hasattr
(
cfg
,
'plugin'
):
if
cfg
.
plugin
:
import
importlib
if
hasattr
(
cfg
,
'plugin_dir'
):
def
import_path
(
plugin_dir
):
_module_dir
=
os
.
path
.
dirname
(
plugin_dir
)
_module_dir
=
_module_dir
.
split
(
'/'
)
_module_path
=
_module_dir
[
0
]
for
m
in
_module_dir
[
1
:]:
_module_path
=
_module_path
+
'.'
+
m
print
(
f
'importing
{
_module_path
}
/'
)
plg_lib
=
importlib
.
import_module
(
_module_path
)
plugin_dirs
=
cfg
.
plugin_dir
if
not
isinstance
(
plugin_dirs
,
list
):
plugin_dirs
=
[
plugin_dirs
,]
for
plugin_dir
in
plugin_dirs
:
import_path
(
plugin_dir
)
else
:
# import dir is the dirpath for the config file
_module_dir
=
os
.
path
.
dirname
(
args
.
config
)
_module_dir
=
_module_dir
.
split
(
'/'
)
_module_path
=
_module_dir
[
0
]
for
m
in
_module_dir
[
1
:]:
_module_path
=
_module_path
+
'.'
+
m
print
(
f
'importing
{
_module_path
}
/'
)
plg_lib
=
importlib
.
import_module
(
_module_path
)
cfg_data_dict
=
cfg
.
data
.
get
(
args
.
split
)
cfg
.
model
.
pretrained
=
None
# in case the test dataset is concatenated
samples_per_gpu
=
1
cfg_data_dict
.
test_mode
=
True
samples_per_gpu
=
cfg_data_dict
.
pop
(
'samples_per_gpu'
,
1
)
if
samples_per_gpu
>
1
:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg_data_dict
.
pipeline
=
replace_ImageToTensor
(
cfg_data_dict
.
pipeline
)
# init distributed env first, since logger depends on the dist info.
if
args
.
launcher
==
'none'
:
distributed
=
False
else
:
distributed
=
True
init_dist
(
args
.
launcher
,
**
cfg
.
dist_params
)
# set random seeds
if
args
.
seed
is
not
None
:
set_random_seed
(
args
.
seed
,
deterministic
=
args
.
deterministic
)
# build the dataloader
if
args
.
work_dir
is
not
None
:
# update configs according to CLI args if args.work_dir is not None
cfg
.
work_dir
=
args
.
work_dir
elif
cfg
.
get
(
'work_dir'
,
None
)
is
None
:
# use config filename as default work_dir if cfg.work_dir is None
cfg
.
work_dir
=
osp
.
join
(
'./work_dirs'
,
osp
.
splitext
(
osp
.
basename
(
args
.
config
))[
0
])
cfg_data_dict
.
work_dir
=
cfg
.
work_dir
print
(
'work_dir: '
,
cfg
.
work_dir
)
dataset
=
build_dataset
(
cfg_data_dict
)
data_loader
=
build_dataloader
(
dataset
,
samples_per_gpu
=
samples_per_gpu
,
workers_per_gpu
=
cfg
.
data
.
workers_per_gpu
,
dist
=
distributed
,
shuffle
=
False
)
# build the model and load checkpoint
cfg
.
model
.
train_cfg
=
None
model
=
build_model
(
cfg
.
model
,
test_cfg
=
cfg
.
get
(
'test_cfg'
))
fp16_cfg
=
cfg
.
get
(
'fp16'
,
None
)
if
fp16_cfg
is
not
None
:
wrap_fp16_model
(
model
)
checkpoint
=
load_checkpoint
(
model
,
args
.
checkpoint
,
map_location
=
'cpu'
)
if
args
.
fuse_conv_bn
:
model
=
fuse_conv_bn
(
model
)
if
not
distributed
:
model
=
MMDataParallel
(
model
,
device_ids
=
[
0
])
outputs
=
single_gpu_test
(
model
,
data_loader
)
else
:
model
=
MMDistributedDataParallel
(
model
.
cuda
(),
device_ids
=
[
torch
.
cuda
.
current_device
()],
broadcast_buffers
=
False
)
outputs
=
multi_gpu_test
(
model
,
data_loader
,
args
.
tmpdir
,
args
.
gpu_collect
)
rank
,
_
=
get_dist_info
()
if
rank
==
0
:
if
args
.
format_only
:
dataset
.
format_results
(
outputs
,
prefix
=
cfg
.
work_dir
)
elif
args
.
eval
:
print
(
'start evaluation!'
)
print
(
dataset
.
evaluate
(
outputs
))
if
__name__
==
'__main__'
:
main
()
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/train.py
0 → 100644
View file @
198ca8f9
from
__future__
import
division
import
argparse
import
copy
import
mmcv
import
os
import
time
import
torch
import
warnings
from
mmcv
import
Config
,
DictAction
from
mmcv.runner
import
get_dist_info
,
init_dist
from
os
import
path
as
osp
from
mmdet
import
__version__
as
mmdet_version
from
mmdet3d
import
__version__
as
mmdet3d_version
from
mmdet3d.apis
import
train_model
from
mmdet3d.datasets
import
build_dataset
from
mmdet3d.utils
import
collect_env
,
get_root_logger
from
mmseg
import
__version__
as
mmseg_version
# warper
from
mmdet_train
import
set_random_seed
# from builder import build_model
from
mmdet3d.models
import
build_model
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Train a detector'
)
parser
.
add_argument
(
'config'
,
help
=
'train config file path'
)
parser
.
add_argument
(
'--work-dir'
,
help
=
'the dir to save logs and models'
)
parser
.
add_argument
(
'--resume-from'
,
help
=
'the checkpoint file to resume from'
)
parser
.
add_argument
(
'--no-validate'
,
action
=
'store_true'
,
help
=
'whether not to evaluate the checkpoint during training'
)
group_gpus
=
parser
.
add_mutually_exclusive_group
()
group_gpus
.
add_argument
(
'--gpus'
,
type
=
int
,
help
=
'number of gpus to use '
'(only applicable to non-distributed training)'
)
group_gpus
.
add_argument
(
'--gpu-ids'
,
type
=
int
,
nargs
=
'+'
,
help
=
'ids of gpus to use '
'(only applicable to non-distributed training)'
)
parser
.
add_argument
(
'--seed'
,
type
=
int
,
default
=
0
,
help
=
'random seed'
)
parser
.
add_argument
(
'--deterministic'
,
action
=
'store_true'
,
help
=
'whether to set deterministic options for CUDNN backend.'
)
parser
.
add_argument
(
'--options'
,
nargs
=
'+'
,
action
=
DictAction
,
help
=
'override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.'
)
parser
.
add_argument
(
'--cfg-options'
,
nargs
=
'+'
,
action
=
DictAction
,
help
=
'override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.'
)
parser
.
add_argument
(
'--launcher'
,
choices
=
[
'none'
,
'pytorch'
,
'slurm'
,
'mpi'
],
default
=
'none'
,
help
=
'job launcher'
)
parser
.
add_argument
(
'--local_rank'
,
type
=
int
,
default
=
0
)
parser
.
add_argument
(
'--autoscale-lr'
,
action
=
'store_true'
,
help
=
'automatically scale lr with the number of gpus'
)
args
=
parser
.
parse_args
()
if
'LOCAL_RANK'
not
in
os
.
environ
:
os
.
environ
[
'LOCAL_RANK'
]
=
str
(
args
.
local_rank
)
if
args
.
options
and
args
.
cfg_options
:
raise
ValueError
(
'--options and --cfg-options cannot be both specified, '
'--options is deprecated in favor of --cfg-options'
)
if
args
.
options
:
warnings
.
warn
(
'--options is deprecated in favor of --cfg-options'
)
args
.
cfg_options
=
args
.
options
return
args
def
main
():
args
=
parse_args
()
cfg
=
Config
.
fromfile
(
args
.
config
)
if
args
.
cfg_options
is
not
None
:
cfg
.
merge_from_dict
(
args
.
cfg_options
)
# import modules from string list.
if
cfg
.
get
(
'custom_imports'
,
None
):
from
mmcv.utils
import
import_modules_from_strings
import_modules_from_strings
(
**
cfg
[
'custom_imports'
])
# set cudnn_benchmark
if
cfg
.
get
(
'cudnn_benchmark'
,
False
):
torch
.
backends
.
cudnn
.
benchmark
=
True
# import modules, registry will be updated
import
sys
sys
.
path
.
append
(
os
.
path
.
abspath
(
'.'
))
if
hasattr
(
cfg
,
'plugin'
):
if
cfg
.
plugin
:
import
importlib
if
hasattr
(
cfg
,
'plugin_dir'
):
def
import_path
(
plugin_dir
):
_module_dir
=
os
.
path
.
dirname
(
plugin_dir
)
_module_dir
=
_module_dir
.
split
(
'/'
)
_module_path
=
_module_dir
[
0
]
for
m
in
_module_dir
[
1
:]:
_module_path
=
_module_path
+
'.'
+
m
print
(
f
'importing
{
_module_path
}
/'
)
plg_lib
=
importlib
.
import_module
(
_module_path
)
plugin_dirs
=
cfg
.
plugin_dir
if
not
isinstance
(
plugin_dirs
,
list
):
plugin_dirs
=
[
plugin_dirs
,]
for
plugin_dir
in
plugin_dirs
:
import_path
(
plugin_dir
)
else
:
# import dir is the dirpath for the config file
_module_dir
=
os
.
path
.
dirname
(
args
.
config
)
_module_dir
=
_module_dir
.
split
(
'/'
)
_module_path
=
_module_dir
[
0
]
for
m
in
_module_dir
[
1
:]:
_module_path
=
_module_path
+
'.'
+
m
print
(
f
'importing
{
_module_path
}
/'
)
plg_lib
=
importlib
.
import_module
(
_module_path
)
# work_dir is determined in this priority: CLI > segment in file > filename
if
args
.
work_dir
is
not
None
:
# update configs according to CLI args if args.work_dir is not None
cfg
.
work_dir
=
args
.
work_dir
elif
cfg
.
get
(
'work_dir'
,
None
)
is
None
:
# use config filename as default work_dir if cfg.work_dir is None
cfg
.
work_dir
=
osp
.
join
(
'./work_dirs'
,
osp
.
splitext
(
osp
.
basename
(
args
.
config
))[
0
])
if
args
.
resume_from
is
not
None
:
cfg
.
resume_from
=
args
.
resume_from
if
args
.
gpu_ids
is
not
None
:
cfg
.
gpu_ids
=
args
.
gpu_ids
else
:
cfg
.
gpu_ids
=
range
(
1
)
if
args
.
gpus
is
None
else
range
(
args
.
gpus
)
if
args
.
autoscale_lr
:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg
.
optimizer
[
'lr'
]
=
cfg
.
optimizer
[
'lr'
]
*
len
(
cfg
.
gpu_ids
)
/
8
# init distributed env first, since logger depends on the dist info.
if
args
.
launcher
==
'none'
:
distributed
=
False
else
:
distributed
=
True
init_dist
(
args
.
launcher
,
**
cfg
.
dist_params
)
# re-set gpu_ids with distributed training mode
_
,
world_size
=
get_dist_info
()
cfg
.
gpu_ids
=
range
(
world_size
)
# create work_dir
mmcv
.
mkdir_or_exist
(
osp
.
abspath
(
cfg
.
work_dir
))
# dump config
cfg
.
dump
(
osp
.
join
(
cfg
.
work_dir
,
osp
.
basename
(
args
.
config
)))
# init the logger before other steps
timestamp
=
time
.
strftime
(
'%Y%m%d_%H%M%S'
,
time
.
localtime
())
log_file
=
osp
.
join
(
cfg
.
work_dir
,
f
'
{
timestamp
}
.log'
)
# specify logger name, if we still use 'mmdet', the output info will be
# filtered and won't be saved in the log_file
# TODO: ugly workaround to judge whether we are training det or seg model
if
cfg
.
model
.
type
in
[
'EncoderDecoder3D'
]:
logger_name
=
'mmseg'
else
:
logger_name
=
'mmdet'
logger
=
get_root_logger
(
log_file
=
log_file
,
log_level
=
cfg
.
log_level
,
name
=
logger_name
)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta
=
dict
()
# log env info
env_info_dict
=
collect_env
()
env_info
=
'
\n
'
.
join
([(
f
'
{
k
}
:
{
v
}
'
)
for
k
,
v
in
env_info_dict
.
items
()])
dash_line
=
'-'
*
60
+
'
\n
'
logger
.
info
(
'Environment info:
\n
'
+
dash_line
+
env_info
+
'
\n
'
+
dash_line
)
meta
[
'env_info'
]
=
env_info
meta
[
'config'
]
=
cfg
.
pretty_text
# log some basic info
logger
.
info
(
f
'Distributed training:
{
distributed
}
'
)
logger
.
info
(
f
'Config:
\n
{
cfg
.
pretty_text
}
'
)
# set random seeds
if
args
.
seed
is
not
None
:
logger
.
info
(
f
'Set random seed to
{
args
.
seed
}
, '
f
'deterministic:
{
args
.
deterministic
}
'
)
set_random_seed
(
args
.
seed
,
deterministic
=
args
.
deterministic
)
cfg
.
seed
=
args
.
seed
meta
[
'seed'
]
=
args
.
seed
meta
[
'exp_name'
]
=
osp
.
basename
(
args
.
config
)
model
=
build_model
(
cfg
.
model
,
train_cfg
=
cfg
.
get
(
'train_cfg'
),
test_cfg
=
cfg
.
get
(
'test_cfg'
))
model
.
init_weights
()
logger
.
info
(
f
'Model:
\n
{
model
}
'
)
cfg
.
data
.
train
.
work_dir
=
cfg
.
work_dir
cfg
.
data
.
val
.
work_dir
=
cfg
.
work_dir
datasets
=
[
build_dataset
(
cfg
.
data
.
train
)]
if
len
(
cfg
.
workflow
)
==
2
:
val_dataset
=
copy
.
deepcopy
(
cfg
.
data
.
val
)
# in case we use a dataset wrapper
if
'dataset'
in
cfg
.
data
.
train
:
val_dataset
.
pipeline
=
cfg
.
data
.
train
.
dataset
.
pipeline
else
:
val_dataset
.
pipeline
=
cfg
.
data
.
train
.
pipeline
# set test_mode=False here in deep copied config
# which do not affect AP/AR calculation later
# refer to https://mmdetection3d.readthedocs.io/en/latest/tutorials/customize_runtime.html#customize-workflow # noqa
val_dataset
.
test_mode
=
False
datasets
.
append
(
build_dataset
(
val_dataset
))
if
cfg
.
checkpoint_config
is
not
None
:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg
.
checkpoint_config
.
meta
=
dict
(
mmdet_version
=
mmdet_version
,
mmseg_version
=
mmseg_version
,
mmdet3d_version
=
mmdet3d_version
,
config
=
cfg
.
pretty_text
,
CLASSES
=
None
,
PALETTE
=
datasets
[
0
].
PALETTE
# for segmentors
if
hasattr
(
datasets
[
0
],
'PALETTE'
)
else
None
)
# add an attribute for visualization convenience
# model.CLASSES = datasets[0].CLASSES
train_model
(
model
,
datasets
,
cfg
,
distributed
=
distributed
,
validate
=
(
not
args
.
no_validate
),
timestamp
=
timestamp
,
meta
=
meta
)
if
__name__
==
'__main__'
:
main
()
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/visualization/renderer.py
0 → 100644
View file @
198ca8f9
import
os.path
as
osp
import
os
import
numpy
as
np
import
copy
import
cv2
import
matplotlib.pyplot
as
plt
from
PIL
import
Image
from
shapely.geometry
import
LineString
def
remove_nan_values
(
uv
):
is_u_valid
=
np
.
logical_not
(
np
.
isnan
(
uv
[:,
0
]))
is_v_valid
=
np
.
logical_not
(
np
.
isnan
(
uv
[:,
1
]))
is_uv_valid
=
np
.
logical_and
(
is_u_valid
,
is_v_valid
)
uv_valid
=
uv
[
is_uv_valid
]
return
uv_valid
def
points_ego2img
(
pts_ego
,
extrinsics
,
intrinsics
):
pts_ego_4d
=
np
.
concatenate
([
pts_ego
,
np
.
ones
([
len
(
pts_ego
),
1
])],
axis
=-
1
)
pts_cam_4d
=
extrinsics
@
pts_ego_4d
.
T
uv
=
(
intrinsics
@
pts_cam_4d
[:
3
,
:]).
T
uv
=
remove_nan_values
(
uv
)
depth
=
uv
[:,
2
]
uv
=
uv
[:,
:
2
]
/
uv
[:,
2
].
reshape
(
-
1
,
1
)
return
uv
,
depth
def
interp_fixed_dist
(
line
,
sample_dist
):
''' Interpolate a line at fixed interval.
Args:
line (LineString): line
sample_dist (float): sample interval
Returns:
points (array): interpolated points, shape (N, 2)
'''
distances
=
list
(
np
.
arange
(
sample_dist
,
line
.
length
,
sample_dist
))
# make sure to sample at least two points when sample_dist > line.length
distances
=
[
0
,]
+
distances
+
[
line
.
length
,]
sampled_points
=
np
.
array
([
list
(
line
.
interpolate
(
distance
).
coords
)
for
distance
in
distances
]).
squeeze
()
return
sampled_points
def
draw_polyline_ego_on_img
(
polyline_ego
,
img_bgr
,
extrinsics
,
intrinsics
,
color_bgr
,
thickness
):
# if 2-dimension, assume z=0
if
polyline_ego
.
shape
[
1
]
==
2
:
zeros
=
np
.
zeros
((
polyline_ego
.
shape
[
0
],
1
))
polyline_ego
=
np
.
concatenate
([
polyline_ego
,
zeros
],
axis
=
1
)
polyline_ego
=
interp_fixed_dist
(
line
=
LineString
(
polyline_ego
),
sample_dist
=
0.2
)
uv
,
depth
=
points_ego2img
(
polyline_ego
,
extrinsics
,
intrinsics
)
h
,
w
,
c
=
img_bgr
.
shape
is_valid_x
=
np
.
logical_and
(
0
<=
uv
[:,
0
],
uv
[:,
0
]
<
w
-
1
)
is_valid_y
=
np
.
logical_and
(
0
<=
uv
[:,
1
],
uv
[:,
1
]
<
h
-
1
)
is_valid_z
=
depth
>
0
is_valid_points
=
np
.
logical_and
.
reduce
([
is_valid_x
,
is_valid_y
,
is_valid_z
])
if
is_valid_points
.
sum
()
==
0
:
return
tmp_list
=
[]
for
i
,
valid
in
enumerate
(
is_valid_points
):
if
valid
:
tmp_list
.
append
(
uv
[
i
])
else
:
if
len
(
tmp_list
)
>=
2
:
tmp_vector
=
np
.
stack
(
tmp_list
)
tmp_vector
=
np
.
round
(
tmp_vector
).
astype
(
np
.
int32
)
draw_visible_polyline_cv2
(
copy
.
deepcopy
(
tmp_vector
),
valid_pts_bool
=
np
.
ones
((
len
(
uv
),
1
),
dtype
=
bool
),
image
=
img_bgr
,
color
=
color_bgr
,
thickness_px
=
thickness
,
)
tmp_list
=
[]
if
len
(
tmp_list
)
>=
2
:
tmp_vector
=
np
.
stack
(
tmp_list
)
tmp_vector
=
np
.
round
(
tmp_vector
).
astype
(
np
.
int32
)
draw_visible_polyline_cv2
(
copy
.
deepcopy
(
tmp_vector
),
valid_pts_bool
=
np
.
ones
((
len
(
uv
),
1
),
dtype
=
bool
),
image
=
img_bgr
,
color
=
color_bgr
,
thickness_px
=
thickness
,
)
# uv = np.round(uv[is_valid_points]).astype(np.int32)
# draw_visible_polyline_cv2(
# copy.deepcopy(uv),
# valid_pts_bool=np.ones((len(uv), 1), dtype=bool),
# image=img_bgr,
# color=color_bgr,
# thickness_px=thickness,
# )
def
draw_visible_polyline_cv2
(
line
,
valid_pts_bool
,
image
,
color
,
thickness_px
):
"""Draw a polyline onto an image using given line segments.
Args:
line: Array of shape (K, 2) representing the coordinates of line.
valid_pts_bool: Array of shape (K,) representing which polyline coordinates are valid for rendering.
For example, if the coordinate is occluded, a user might specify that it is invalid.
Line segments touching an invalid vertex will not be rendered.
image: Array of shape (H, W, 3), representing a 3-channel BGR image
color: Tuple of shape (3,) with a BGR format color
thickness_px: thickness (in pixels) to use when rendering the polyline.
"""
line
=
np
.
round
(
line
).
astype
(
int
)
# type: ignore
for
i
in
range
(
len
(
line
)
-
1
):
if
(
not
valid_pts_bool
[
i
])
or
(
not
valid_pts_bool
[
i
+
1
]):
continue
x1
=
line
[
i
][
0
]
y1
=
line
[
i
][
1
]
x2
=
line
[
i
+
1
][
0
]
y2
=
line
[
i
+
1
][
1
]
# Use anti-aliasing (AA) for curves
image
=
cv2
.
line
(
image
,
pt1
=
(
x1
,
y1
),
pt2
=
(
x2
,
y2
),
color
=
color
,
thickness
=
thickness_px
,
lineType
=
cv2
.
LINE_AA
)
COLOR_MAPS_BGR
=
{
# bgr colors
'divider'
:
(
0
,
0
,
255
),
'boundary'
:
(
0
,
255
,
0
),
'ped_crossing'
:
(
255
,
0
,
0
),
'centerline'
:
(
51
,
183
,
255
),
'drivable_area'
:
(
171
,
255
,
255
)
}
COLOR_MAPS_PLT
=
{
'divider'
:
'r'
,
'boundary'
:
'g'
,
'ped_crossing'
:
'b'
,
'centerline'
:
'orange'
,
'drivable_area'
:
'y'
,
}
CAM_NAMES_AV2
=
[
'ring_front_center'
,
'ring_front_right'
,
'ring_front_left'
,
'ring_rear_right'
,
'ring_rear_left'
,
'ring_side_right'
,
'ring_side_left'
,
]
class
Renderer
(
object
):
"""Render map elements on image views.
Args:
roi_size (tuple): bev range
"""
def
__init__
(
self
,
roi_size
):
self
.
roi_size
=
roi_size
def
render_bev_from_vectors
(
self
,
vectors
,
out_dir
):
'''Plot vectorized map elements on BEV.
Args:
vectors (dict): dict of vectorized map elements.
out_dir (str): output directory
'''
car_img
=
Image
.
open
(
'resources/images/car.png'
)
map_path
=
os
.
path
.
join
(
out_dir
,
'map.jpg'
)
plt
.
figure
(
figsize
=
(
self
.
roi_size
[
0
],
self
.
roi_size
[
1
]))
plt
.
xlim
(
-
self
.
roi_size
[
0
]
/
2
-
1
,
self
.
roi_size
[
0
]
/
2
+
1
)
plt
.
ylim
(
-
self
.
roi_size
[
1
]
/
2
-
1
,
self
.
roi_size
[
1
]
/
2
+
1
)
plt
.
axis
(
'off'
)
plt
.
imshow
(
car_img
,
extent
=
[
-
1.5
,
1.5
,
-
1.2
,
1.2
])
for
cat
,
vector_list
in
vectors
.
items
():
color
=
COLOR_MAPS_PLT
[
cat
]
for
vector
in
vector_list
:
pts
=
np
.
array
(
vector
)[:,
:
2
]
x
=
np
.
array
([
pt
[
0
]
for
pt
in
pts
])
y
=
np
.
array
([
pt
[
1
]
for
pt
in
pts
])
# plt.quiver(x[:-1], y[:-1], x[1:] - x[:-1], y[1:] - y[:-1], angles='xy', color=color,
# scale_units='xy', scale=1)
plt
.
plot
(
x
,
y
,
color
=
color
,
linewidth
=
5
,
marker
=
'o'
,
linestyle
=
'-'
,
markersize
=
20
)
plt
.
savefig
(
map_path
,
bbox_inches
=
'tight'
,
dpi
=
40
)
plt
.
close
()
def
render_camera_views_from_vectors
(
self
,
vectors
,
imgs
,
extrinsics
,
intrinsics
,
thickness
,
out_dir
):
'''Project vectorized map elements to camera views.
Args:
vectors (dict): dict of vectorized map elements.
imgs (tensor): images in bgr color.
extrinsics (array): ego2img extrinsics, shape (4, 4)
intrinsics (array): intrinsics, shape (3, 3)
thickness (int): thickness of lines to draw on images.
out_dir (str): output directory
'''
for
i
in
range
(
len
(
imgs
)):
img
=
imgs
[
i
]
extrinsic
=
extrinsics
[
i
]
intrinsic
=
intrinsics
[
i
]
img_bgr
=
copy
.
deepcopy
(
img
)
for
cat
,
vector_list
in
vectors
.
items
():
color
=
COLOR_MAPS_BGR
[
cat
]
for
vector
in
vector_list
:
img_bgr
=
np
.
ascontiguousarray
(
img_bgr
)
vector_array
=
np
.
array
(
vector
)
if
vector_array
.
shape
[
1
]
>
3
:
vector_array
=
vector_array
[:,
:
3
]
draw_polyline_ego_on_img
(
vector_array
,
img_bgr
,
extrinsic
,
intrinsic
,
color
,
thickness
)
out_path
=
osp
.
join
(
out_dir
,
CAM_NAMES_AV2
[
i
])
+
'.jpg'
cv2
.
imwrite
(
out_path
,
img_bgr
)
autonomous_driving/Online-HD-Map-Construction-CVPR2023/tools/visualization/visualize.py
0 → 100644
View file @
198ca8f9
import
argparse
import
mmcv
from
mmcv
import
Config
import
os
from
renderer
import
Renderer
CAT2ID
=
{
'ped_crossing'
:
0
,
'divider'
:
1
,
'boundary'
:
2
,
}
ID2CAT
=
{
v
:
k
for
k
,
v
in
CAT2ID
.
items
()}
ROI_SIZE
=
(
60
,
30
)
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Visualize groundtruth and results'
)
parser
.
add_argument
(
'log_id'
,
type
=
str
,
help
=
'log_id of data to visualize'
)
parser
.
add_argument
(
'ann_file'
,
help
=
'gt file to visualize'
)
parser
.
add_argument
(
'--result'
,
type
=
str
,
help
=
'prediction result to visualize'
)
parser
.
add_argument
(
'--thr'
,
type
=
float
,
default
=
0
,
help
=
'score threshold to filter predictions'
)
parser
.
add_argument
(
'--out-dir'
,
default
=
'demo'
,
help
=
'directory where visualize results will be saved'
)
args
=
parser
.
parse_args
()
return
args
def
import_plugin
(
cfg
):
'''
import modules, registry will be update
'''
import
sys
sys
.
path
.
append
(
os
.
path
.
abspath
(
'.'
))
if
hasattr
(
cfg
,
'plugin'
):
if
cfg
.
plugin
:
import
importlib
if
hasattr
(
cfg
,
'plugin_dir'
):
def
import_path
(
plugin_dir
):
_module_dir
=
os
.
path
.
dirname
(
plugin_dir
)
_module_dir
=
_module_dir
.
split
(
'/'
)
_module_path
=
_module_dir
[
0
]
for
m
in
_module_dir
[
1
:]:
_module_path
=
_module_path
+
'.'
+
m
print
(
f
'importing
{
_module_path
}
/'
)
plg_lib
=
importlib
.
import_module
(
_module_path
)
plugin_dirs
=
cfg
.
plugin_dir
if
not
isinstance
(
plugin_dirs
,
list
):
plugin_dirs
=
[
plugin_dirs
,]
for
plugin_dir
in
plugin_dirs
:
import_path
(
plugin_dir
)
else
:
# import dir is the dirpath for the config file
_module_dir
=
os
.
path
.
dirname
(
args
.
config
)
_module_dir
=
_module_dir
.
split
(
'/'
)
_module_path
=
_module_dir
[
0
]
for
m
in
_module_dir
[
1
:]:
_module_path
=
_module_path
+
'.'
+
m
print
(
f
'importing
{
_module_path
}
/'
)
plg_lib
=
importlib
.
import_module
(
_module_path
)
def
main
(
args
):
log_id
=
args
.
log_id
ann
=
mmcv
.
load
(
args
.
ann_file
)
root_path
=
os
.
path
.
dirname
(
args
.
ann_file
)
out_dir
=
os
.
path
.
join
(
args
.
out_dir
,
str
(
log_id
))
log_ann
=
ann
[
log_id
]
renderer
=
Renderer
(
roi_size
=
ROI_SIZE
)
if
args
.
result
:
result
=
mmcv
.
load
(
args
.
result
)[
'results'
]
for
frame
in
mmcv
.
track_iter_progress
(
log_ann
):
timestamp
=
frame
[
'timestamp'
]
sensor
=
frame
[
'sensor'
]
annotation
=
frame
[
'annotation'
]
imgs
=
[
mmcv
.
imread
(
os
.
path
.
join
(
root_path
,
'argoverse2'
,
i
[
'image_path'
]))
for
i
in
sensor
.
values
()]
extrinsics
=
[
i
[
'extrinsic'
]
for
i
in
sensor
.
values
()]
intrinsics
=
[
i
[
'intrinsic'
]
for
i
in
sensor
.
values
()]
frame_dir
=
os
.
path
.
join
(
out_dir
,
timestamp
,
'gt'
)
os
.
makedirs
(
frame_dir
,
exist_ok
=
True
)
renderer
.
render_bev_from_vectors
(
annotation
,
out_dir
=
frame_dir
)
renderer
.
render_camera_views_from_vectors
(
annotation
,
imgs
,
extrinsics
,
intrinsics
,
4
,
frame_dir
)
if
args
.
result
:
pred
=
result
[
timestamp
]
vectors
=
{
cat
:
[]
for
cat
in
CAT2ID
.
keys
()}
for
i
in
range
(
len
(
pred
[
'labels'
])):
score
=
pred
[
'scores'
][
i
]
label
=
pred
[
'labels'
][
i
]
v
=
pred
[
'vectors'
][
i
]
if
score
>
args
.
thr
:
vectors
[
ID2CAT
[
label
]].
append
(
v
)
frame_dir
=
os
.
path
.
join
(
out_dir
,
timestamp
,
'pred'
)
os
.
makedirs
(
frame_dir
,
exist_ok
=
True
)
renderer
.
render_bev_from_vectors
(
vectors
,
out_dir
=
frame_dir
)
renderer
.
render_camera_views_from_vectors
(
vectors
,
imgs
,
extrinsics
,
intrinsics
,
4
,
frame_dir
)
if
__name__
==
'__main__'
:
args
=
parse_args
()
main
(
args
)
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment