Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
dcnv3
Commits
305e110f
Commit
305e110f
authored
Jul 18, 2023
by
yeshenglong1
Browse files
update tools
parent
631a5159
Changes
37
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
5436 additions
and
0 deletions
+5436
-0
autonomous_driving/openlane-v2/tools/analysis_tools/analyze_logs.py
..._driving/openlane-v2/tools/analysis_tools/analyze_logs.py
+202
-0
autonomous_driving/openlane-v2/tools/analysis_tools/benchmark.py
...ous_driving/openlane-v2/tools/analysis_tools/benchmark.py
+96
-0
autonomous_driving/openlane-v2/tools/analysis_tools/get_flops.py
...ous_driving/openlane-v2/tools/analysis_tools/get_flops.py
+92
-0
autonomous_driving/openlane-v2/tools/create_data.py
autonomous_driving/openlane-v2/tools/create_data.py
+313
-0
autonomous_driving/openlane-v2/tools/create_data.sh
autonomous_driving/openlane-v2/tools/create_data.sh
+24
-0
autonomous_driving/openlane-v2/tools/data_converter/__init__.py
...mous_driving/openlane-v2/tools/data_converter/__init__.py
+1
-0
autonomous_driving/openlane-v2/tools/data_converter/create_gt_database.py
...ng/openlane-v2/tools/data_converter/create_gt_database.py
+624
-0
autonomous_driving/openlane-v2/tools/data_converter/indoor_converter.py
...ving/openlane-v2/tools/data_converter/indoor_converter.py
+121
-0
autonomous_driving/openlane-v2/tools/data_converter/kitti_converter.py
...iving/openlane-v2/tools/data_converter/kitti_converter.py
+624
-0
autonomous_driving/openlane-v2/tools/data_converter/kitti_data_utils.py
...ving/openlane-v2/tools/data_converter/kitti_data_utils.py
+621
-0
autonomous_driving/openlane-v2/tools/data_converter/lyft_converter.py
...riving/openlane-v2/tools/data_converter/lyft_converter.py
+271
-0
autonomous_driving/openlane-v2/tools/data_converter/lyft_data_fixer.py
...iving/openlane-v2/tools/data_converter/lyft_data_fixer.py
+39
-0
autonomous_driving/openlane-v2/tools/data_converter/nuimage_converter.py
...ing/openlane-v2/tools/data_converter/nuimage_converter.py
+226
-0
autonomous_driving/openlane-v2/tools/data_converter/nuscenes_converter.py
...ng/openlane-v2/tools/data_converter/nuscenes_converter.py
+628
-0
autonomous_driving/openlane-v2/tools/data_converter/s3dis_data_utils.py
...ving/openlane-v2/tools/data_converter/s3dis_data_utils.py
+245
-0
autonomous_driving/openlane-v2/tools/data_converter/scannet_data_utils.py
...ng/openlane-v2/tools/data_converter/scannet_data_utils.py
+297
-0
autonomous_driving/openlane-v2/tools/data_converter/sunrgbd_data_utils.py
...ng/openlane-v2/tools/data_converter/sunrgbd_data_utils.py
+223
-0
autonomous_driving/openlane-v2/tools/data_converter/waymo_converter.py
...iving/openlane-v2/tools/data_converter/waymo_converter.py
+558
-0
autonomous_driving/openlane-v2/tools/deployment/mmdet3d2torchserve.py
...riving/openlane-v2/tools/deployment/mmdet3d2torchserve.py
+111
-0
autonomous_driving/openlane-v2/tools/deployment/mmdet3d_handler.py
...s_driving/openlane-v2/tools/deployment/mmdet3d_handler.py
+120
-0
No files found.
autonomous_driving/openlane-v2/tools/analysis_tools/analyze_logs.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
argparse
import
json
from
collections
import
defaultdict
import
numpy
as
np
import
seaborn
as
sns
from
matplotlib
import
pyplot
as
plt
def
cal_train_time
(
log_dicts
,
args
):
for
i
,
log_dict
in
enumerate
(
log_dicts
):
print
(
f
'
{
"-"
*
5
}
Analyze train time of
{
args
.
json_logs
[
i
]
}{
"-"
*
5
}
'
)
all_times
=
[]
for
epoch
in
log_dict
.
keys
():
if
args
.
include_outliers
:
all_times
.
append
(
log_dict
[
epoch
][
'time'
])
else
:
all_times
.
append
(
log_dict
[
epoch
][
'time'
][
1
:])
all_times
=
np
.
array
(
all_times
)
epoch_ave_time
=
all_times
.
mean
(
-
1
)
slowest_epoch
=
epoch_ave_time
.
argmax
()
fastest_epoch
=
epoch_ave_time
.
argmin
()
std_over_epoch
=
epoch_ave_time
.
std
()
print
(
f
'slowest epoch
{
slowest_epoch
+
1
}
, '
f
'average time is
{
epoch_ave_time
[
slowest_epoch
]:.
4
f
}
'
)
print
(
f
'fastest epoch
{
fastest_epoch
+
1
}
, '
f
'average time is
{
epoch_ave_time
[
fastest_epoch
]:.
4
f
}
'
)
print
(
f
'time std over epochs is
{
std_over_epoch
:.
4
f
}
'
)
print
(
f
'average iter time:
{
np
.
mean
(
all_times
):.
4
f
}
s/iter'
)
print
()
def
plot_curve
(
log_dicts
,
args
):
if
args
.
backend
is
not
None
:
plt
.
switch_backend
(
args
.
backend
)
sns
.
set_style
(
args
.
style
)
# if legend is None, use {filename}_{key} as legend
legend
=
args
.
legend
if
legend
is
None
:
legend
=
[]
for
json_log
in
args
.
json_logs
:
for
metric
in
args
.
keys
:
legend
.
append
(
f
'
{
json_log
}
_
{
metric
}
'
)
assert
len
(
legend
)
==
(
len
(
args
.
json_logs
)
*
len
(
args
.
keys
))
metrics
=
args
.
keys
num_metrics
=
len
(
metrics
)
for
i
,
log_dict
in
enumerate
(
log_dicts
):
epochs
=
list
(
log_dict
.
keys
())
for
j
,
metric
in
enumerate
(
metrics
):
print
(
f
'plot curve of
{
args
.
json_logs
[
i
]
}
, metric is
{
metric
}
'
)
if
metric
not
in
log_dict
[
epochs
[
args
.
interval
-
1
]]:
raise
KeyError
(
f
'
{
args
.
json_logs
[
i
]
}
does not contain metric
{
metric
}
'
)
if
args
.
mode
==
'eval'
:
if
min
(
epochs
)
==
args
.
interval
:
x0
=
args
.
interval
else
:
# if current training is resumed from previous checkpoint
# we lost information in early epochs
# `xs` should start according to `min(epochs)`
if
min
(
epochs
)
%
args
.
interval
==
0
:
x0
=
min
(
epochs
)
else
:
# find the first epoch that do eval
x0
=
min
(
epochs
)
+
args
.
interval
-
\
min
(
epochs
)
%
args
.
interval
xs
=
np
.
arange
(
x0
,
max
(
epochs
)
+
1
,
args
.
interval
)
ys
=
[]
for
epoch
in
epochs
[
args
.
interval
-
1
::
args
.
interval
]:
ys
+=
log_dict
[
epoch
][
metric
]
# if training is aborted before eval of the last epoch
# `xs` and `ys` will have different length and cause an error
# check if `ys[-1]` is empty here
if
not
log_dict
[
epoch
][
metric
]:
xs
=
xs
[:
-
1
]
ax
=
plt
.
gca
()
ax
.
set_xticks
(
xs
)
plt
.
xlabel
(
'epoch'
)
plt
.
plot
(
xs
,
ys
,
label
=
legend
[
i
*
num_metrics
+
j
],
marker
=
'o'
)
else
:
xs
=
[]
ys
=
[]
num_iters_per_epoch
=
\
log_dict
[
epochs
[
args
.
interval
-
1
]][
'iter'
][
-
1
]
for
epoch
in
epochs
[
args
.
interval
-
1
::
args
.
interval
]:
iters
=
log_dict
[
epoch
][
'iter'
]
if
log_dict
[
epoch
][
'mode'
][
-
1
]
==
'val'
:
iters
=
iters
[:
-
1
]
xs
.
append
(
np
.
array
(
iters
)
+
(
epoch
-
1
)
*
num_iters_per_epoch
)
ys
.
append
(
np
.
array
(
log_dict
[
epoch
][
metric
][:
len
(
iters
)]))
xs
=
np
.
concatenate
(
xs
)
ys
=
np
.
concatenate
(
ys
)
plt
.
xlabel
(
'iter'
)
plt
.
plot
(
xs
,
ys
,
label
=
legend
[
i
*
num_metrics
+
j
],
linewidth
=
0.5
)
plt
.
legend
()
if
args
.
title
is
not
None
:
plt
.
title
(
args
.
title
)
if
args
.
out
is
None
:
plt
.
show
()
else
:
print
(
f
'save curve to:
{
args
.
out
}
'
)
plt
.
savefig
(
args
.
out
)
plt
.
cla
()
def
add_plot_parser
(
subparsers
):
parser_plt
=
subparsers
.
add_parser
(
'plot_curve'
,
help
=
'parser for plotting curves'
)
parser_plt
.
add_argument
(
'json_logs'
,
type
=
str
,
nargs
=
'+'
,
help
=
'path of train log in json format'
)
parser_plt
.
add_argument
(
'--keys'
,
type
=
str
,
nargs
=
'+'
,
default
=
[
'mAP_0.25'
],
help
=
'the metric that you want to plot'
)
parser_plt
.
add_argument
(
'--title'
,
type
=
str
,
help
=
'title of figure'
)
parser_plt
.
add_argument
(
'--legend'
,
type
=
str
,
nargs
=
'+'
,
default
=
None
,
help
=
'legend of each plot'
)
parser_plt
.
add_argument
(
'--backend'
,
type
=
str
,
default
=
None
,
help
=
'backend of plt'
)
parser_plt
.
add_argument
(
'--style'
,
type
=
str
,
default
=
'dark'
,
help
=
'style of plt'
)
parser_plt
.
add_argument
(
'--out'
,
type
=
str
,
default
=
None
)
parser_plt
.
add_argument
(
'--mode'
,
type
=
str
,
default
=
'train'
)
parser_plt
.
add_argument
(
'--interval'
,
type
=
int
,
default
=
1
)
def
add_time_parser
(
subparsers
):
parser_time
=
subparsers
.
add_parser
(
'cal_train_time'
,
help
=
'parser for computing the average time per training iteration'
)
parser_time
.
add_argument
(
'json_logs'
,
type
=
str
,
nargs
=
'+'
,
help
=
'path of train log in json format'
)
parser_time
.
add_argument
(
'--include-outliers'
,
action
=
'store_true'
,
help
=
'include the first value of every epoch when computing '
'the average time'
)
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Analyze Json Log'
)
# currently only support plot curve and calculate average train time
subparsers
=
parser
.
add_subparsers
(
dest
=
'task'
,
help
=
'task parser'
)
add_plot_parser
(
subparsers
)
add_time_parser
(
subparsers
)
args
=
parser
.
parse_args
()
return
args
def
load_json_logs
(
json_logs
):
# load and convert json_logs to log_dict, key is epoch, value is a sub dict
# keys of sub dict is different metrics, e.g. memory, bbox_mAP
# value of sub dict is a list of corresponding values of all iterations
log_dicts
=
[
dict
()
for
_
in
json_logs
]
for
json_log
,
log_dict
in
zip
(
json_logs
,
log_dicts
):
with
open
(
json_log
,
'r'
)
as
log_file
:
for
line
in
log_file
:
log
=
json
.
loads
(
line
.
strip
())
# skip lines without `epoch` field
if
'epoch'
not
in
log
:
continue
epoch
=
log
.
pop
(
'epoch'
)
if
epoch
not
in
log_dict
:
log_dict
[
epoch
]
=
defaultdict
(
list
)
for
k
,
v
in
log
.
items
():
log_dict
[
epoch
][
k
].
append
(
v
)
return
log_dicts
def
main
():
args
=
parse_args
()
json_logs
=
args
.
json_logs
for
json_log
in
json_logs
:
assert
json_log
.
endswith
(
'.json'
)
log_dicts
=
load_json_logs
(
json_logs
)
eval
(
args
.
task
)(
log_dicts
,
args
)
if
__name__
==
'__main__'
:
main
()
autonomous_driving/openlane-v2/tools/analysis_tools/benchmark.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
argparse
import
time
import
torch
from
mmcv
import
Config
from
mmcv.parallel
import
MMDataParallel
from
mmcv.runner
import
load_checkpoint
,
wrap_fp16_model
from
mmdet3d.datasets
import
build_dataloader
,
build_dataset
from
mmdet3d.models
import
build_detector
from
tools.misc.fuse_conv_bn
import
fuse_module
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'MMDet benchmark a model'
)
parser
.
add_argument
(
'config'
,
help
=
'test config file path'
)
parser
.
add_argument
(
'checkpoint'
,
help
=
'checkpoint file'
)
parser
.
add_argument
(
'--samples'
,
default
=
2000
,
help
=
'samples to benchmark'
)
parser
.
add_argument
(
'--log-interval'
,
default
=
50
,
help
=
'interval of logging'
)
parser
.
add_argument
(
'--fuse-conv-bn'
,
action
=
'store_true'
,
help
=
'Whether to fuse conv and bn, this will slightly increase'
'the inference speed'
)
args
=
parser
.
parse_args
()
return
args
def
main
():
args
=
parse_args
()
cfg
=
Config
.
fromfile
(
args
.
config
)
# set cudnn_benchmark
if
cfg
.
get
(
'cudnn_benchmark'
,
False
):
torch
.
backends
.
cudnn
.
benchmark
=
True
cfg
.
model
.
pretrained
=
None
cfg
.
data
.
test
.
test_mode
=
True
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset
=
build_dataset
(
cfg
.
data
.
test
)
data_loader
=
build_dataloader
(
dataset
,
samples_per_gpu
=
1
,
workers_per_gpu
=
cfg
.
data
.
workers_per_gpu
,
dist
=
False
,
shuffle
=
False
)
# build the model and load checkpoint
cfg
.
model
.
train_cfg
=
None
model
=
build_detector
(
cfg
.
model
,
test_cfg
=
cfg
.
get
(
'test_cfg'
))
fp16_cfg
=
cfg
.
get
(
'fp16'
,
None
)
if
fp16_cfg
is
not
None
:
wrap_fp16_model
(
model
)
load_checkpoint
(
model
,
args
.
checkpoint
,
map_location
=
'cpu'
)
if
args
.
fuse_conv_bn
:
model
=
fuse_module
(
model
)
model
=
MMDataParallel
(
model
,
device_ids
=
[
0
])
model
.
eval
()
# the first several iterations may be very slow so skip them
num_warmup
=
5
pure_inf_time
=
0
# benchmark with several samples and take the average
for
i
,
data
in
enumerate
(
data_loader
):
torch
.
cuda
.
synchronize
()
start_time
=
time
.
perf_counter
()
with
torch
.
no_grad
():
model
(
return_loss
=
False
,
rescale
=
True
,
**
data
)
torch
.
cuda
.
synchronize
()
elapsed
=
time
.
perf_counter
()
-
start_time
if
i
>=
num_warmup
:
pure_inf_time
+=
elapsed
if
(
i
+
1
)
%
args
.
log_interval
==
0
:
fps
=
(
i
+
1
-
num_warmup
)
/
pure_inf_time
print
(
f
'Done image [
{
i
+
1
:
<
3
}
/
{
args
.
samples
}
], '
f
'fps:
{
fps
:.
1
f
}
img / s'
)
if
(
i
+
1
)
==
args
.
samples
:
pure_inf_time
+=
elapsed
fps
=
(
i
+
1
-
num_warmup
)
/
pure_inf_time
print
(
f
'Overall fps:
{
fps
:.
1
f
}
img / s'
)
break
if
__name__
==
'__main__'
:
main
()
autonomous_driving/openlane-v2/tools/analysis_tools/get_flops.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
argparse
import
torch
from
mmcv
import
Config
,
DictAction
from
mmdet3d.models
import
build_model
try
:
from
mmcv.cnn
import
get_model_complexity_info
except
ImportError
:
raise
ImportError
(
'Please upgrade mmcv to >0.6.2'
)
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Train a detector'
)
parser
.
add_argument
(
'config'
,
help
=
'train config file path'
)
parser
.
add_argument
(
'--shape'
,
type
=
int
,
nargs
=
'+'
,
default
=
[
40000
,
4
],
help
=
'input point cloud size'
)
parser
.
add_argument
(
'--modality'
,
type
=
str
,
default
=
'point'
,
choices
=
[
'point'
,
'image'
,
'multi'
],
help
=
'input data modality'
)
parser
.
add_argument
(
'--cfg-options'
,
nargs
=
'+'
,
action
=
DictAction
,
help
=
'override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.'
)
args
=
parser
.
parse_args
()
return
args
def
main
():
args
=
parse_args
()
if
args
.
modality
==
'point'
:
assert
len
(
args
.
shape
)
==
2
,
'invalid input shape'
input_shape
=
tuple
(
args
.
shape
)
elif
args
.
modality
==
'image'
:
if
len
(
args
.
shape
)
==
1
:
input_shape
=
(
3
,
args
.
shape
[
0
],
args
.
shape
[
0
])
elif
len
(
args
.
shape
)
==
2
:
input_shape
=
(
3
,
)
+
tuple
(
args
.
shape
)
else
:
raise
ValueError
(
'invalid input shape'
)
elif
args
.
modality
==
'multi'
:
raise
NotImplementedError
(
'FLOPs counter is currently not supported for models with '
'multi-modality input'
)
cfg
=
Config
.
fromfile
(
args
.
config
)
if
args
.
cfg_options
is
not
None
:
cfg
.
merge_from_dict
(
args
.
cfg_options
)
model
=
build_model
(
cfg
.
model
,
train_cfg
=
cfg
.
get
(
'train_cfg'
),
test_cfg
=
cfg
.
get
(
'test_cfg'
))
if
torch
.
cuda
.
is_available
():
model
.
cuda
()
model
.
eval
()
if
hasattr
(
model
,
'forward_dummy'
):
model
.
forward
=
model
.
forward_dummy
else
:
raise
NotImplementedError
(
'FLOPs counter is currently not supported for {}'
.
format
(
model
.
__class__
.
__name__
))
flops
,
params
=
get_model_complexity_info
(
model
,
input_shape
)
split_line
=
'='
*
30
print
(
f
'
{
split_line
}
\n
Input shape:
{
input_shape
}
\n
'
f
'Flops:
{
flops
}
\n
Params:
{
params
}
\n
{
split_line
}
'
)
print
(
'!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.'
)
if
__name__
==
'__main__'
:
main
()
autonomous_driving/openlane-v2/tools/create_data.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
argparse
from
os
import
path
as
osp
from
tools.data_converter
import
indoor_converter
as
indoor
from
tools.data_converter
import
kitti_converter
as
kitti
from
tools.data_converter
import
lyft_converter
as
lyft_converter
from
tools.data_converter
import
nuscenes_converter
as
nuscenes_converter
from
tools.data_converter.create_gt_database
import
(
GTDatabaseCreater
,
create_groundtruth_database
)
def
kitti_data_prep
(
root_path
,
info_prefix
,
version
,
out_dir
,
with_plane
=
False
):
"""Prepare data related to Kitti dataset.
Related data consists of '.pkl' files recording basic infos,
2D annotations and groundtruth database.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
version (str): Dataset version.
out_dir (str): Output directory of the groundtruth database info.
with_plane (bool, optional): Whether to use plane information.
Default: False.
"""
kitti
.
create_kitti_info_file
(
root_path
,
info_prefix
,
with_plane
)
kitti
.
create_reduced_point_cloud
(
root_path
,
info_prefix
)
info_train_path
=
osp
.
join
(
root_path
,
f
'
{
info_prefix
}
_infos_train.pkl'
)
info_val_path
=
osp
.
join
(
root_path
,
f
'
{
info_prefix
}
_infos_val.pkl'
)
info_trainval_path
=
osp
.
join
(
root_path
,
f
'
{
info_prefix
}
_infos_trainval.pkl'
)
info_test_path
=
osp
.
join
(
root_path
,
f
'
{
info_prefix
}
_infos_test.pkl'
)
kitti
.
export_2d_annotation
(
root_path
,
info_train_path
)
kitti
.
export_2d_annotation
(
root_path
,
info_val_path
)
kitti
.
export_2d_annotation
(
root_path
,
info_trainval_path
)
kitti
.
export_2d_annotation
(
root_path
,
info_test_path
)
create_groundtruth_database
(
'KittiDataset'
,
root_path
,
info_prefix
,
f
'
{
out_dir
}
/
{
info_prefix
}
_infos_train.pkl'
,
relative_path
=
False
,
mask_anno_path
=
'instances_train.json'
,
with_mask
=
(
version
==
'mask'
))
def
nuscenes_data_prep
(
root_path
,
info_prefix
,
version
,
dataset_name
,
out_dir
,
max_sweeps
=
10
):
"""Prepare data related to nuScenes dataset.
Related data consists of '.pkl' files recording basic infos,
2D annotations and groundtruth database.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
version (str): Dataset version.
dataset_name (str): The dataset class name.
out_dir (str): Output directory of the groundtruth database info.
max_sweeps (int, optional): Number of input consecutive frames.
Default: 10
"""
nuscenes_converter
.
create_nuscenes_infos
(
root_path
,
info_prefix
,
version
=
version
,
max_sweeps
=
max_sweeps
)
if
version
==
'v1.0-test'
:
info_test_path
=
osp
.
join
(
root_path
,
f
'
{
info_prefix
}
_infos_test.pkl'
)
nuscenes_converter
.
export_2d_annotation
(
root_path
,
info_test_path
,
version
=
version
)
return
info_train_path
=
osp
.
join
(
root_path
,
f
'
{
info_prefix
}
_infos_train.pkl'
)
info_val_path
=
osp
.
join
(
root_path
,
f
'
{
info_prefix
}
_infos_val.pkl'
)
nuscenes_converter
.
export_2d_annotation
(
root_path
,
info_train_path
,
version
=
version
)
nuscenes_converter
.
export_2d_annotation
(
root_path
,
info_val_path
,
version
=
version
)
create_groundtruth_database
(
dataset_name
,
root_path
,
info_prefix
,
f
'
{
out_dir
}
/
{
info_prefix
}
_infos_train.pkl'
)
def
lyft_data_prep
(
root_path
,
info_prefix
,
version
,
max_sweeps
=
10
):
"""Prepare data related to Lyft dataset.
Related data consists of '.pkl' files recording basic infos.
Although the ground truth database and 2D annotations are not used in
Lyft, it can also be generated like nuScenes.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
version (str): Dataset version.
max_sweeps (int, optional): Number of input consecutive frames.
Defaults to 10.
"""
lyft_converter
.
create_lyft_infos
(
root_path
,
info_prefix
,
version
=
version
,
max_sweeps
=
max_sweeps
)
def
scannet_data_prep
(
root_path
,
info_prefix
,
out_dir
,
workers
):
"""Prepare the info file for scannet dataset.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
out_dir (str): Output directory of the generated info file.
workers (int): Number of threads to be used.
"""
indoor
.
create_indoor_info_file
(
root_path
,
info_prefix
,
out_dir
,
workers
=
workers
)
def
s3dis_data_prep
(
root_path
,
info_prefix
,
out_dir
,
workers
):
"""Prepare the info file for s3dis dataset.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
out_dir (str): Output directory of the generated info file.
workers (int): Number of threads to be used.
"""
indoor
.
create_indoor_info_file
(
root_path
,
info_prefix
,
out_dir
,
workers
=
workers
)
def
sunrgbd_data_prep
(
root_path
,
info_prefix
,
out_dir
,
workers
,
num_points
):
"""Prepare the info file for sunrgbd dataset.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
out_dir (str): Output directory of the generated info file.
workers (int): Number of threads to be used.
"""
indoor
.
create_indoor_info_file
(
root_path
,
info_prefix
,
out_dir
,
workers
=
workers
,
num_points
=
num_points
)
def
waymo_data_prep
(
root_path
,
info_prefix
,
version
,
out_dir
,
workers
,
max_sweeps
=
5
):
"""Prepare the info file for waymo dataset.
Args:
root_path (str): Path of dataset root.
info_prefix (str): The prefix of info filenames.
out_dir (str): Output directory of the generated info file.
workers (int): Number of threads to be used.
max_sweeps (int, optional): Number of input consecutive frames.
Default: 5. Here we store pose information of these frames
for later use.
"""
from
tools.data_converter
import
waymo_converter
as
waymo
splits
=
[
'training'
,
'validation'
,
'testing'
]
for
i
,
split
in
enumerate
(
splits
):
load_dir
=
osp
.
join
(
root_path
,
'waymo_format'
,
split
)
if
split
==
'validation'
:
save_dir
=
osp
.
join
(
out_dir
,
'kitti_format'
,
'training'
)
else
:
save_dir
=
osp
.
join
(
out_dir
,
'kitti_format'
,
split
)
converter
=
waymo
.
Waymo2KITTI
(
load_dir
,
save_dir
,
prefix
=
str
(
i
),
workers
=
workers
,
test_mode
=
(
split
==
'testing'
))
converter
.
convert
()
# Generate waymo infos
out_dir
=
osp
.
join
(
out_dir
,
'kitti_format'
)
kitti
.
create_waymo_info_file
(
out_dir
,
info_prefix
,
max_sweeps
=
max_sweeps
,
workers
=
workers
)
GTDatabaseCreater
(
'WaymoDataset'
,
out_dir
,
info_prefix
,
f
'
{
out_dir
}
/
{
info_prefix
}
_infos_train.pkl'
,
relative_path
=
False
,
with_mask
=
False
,
num_worker
=
workers
).
create
()
parser
=
argparse
.
ArgumentParser
(
description
=
'Data converter arg parser'
)
parser
.
add_argument
(
'dataset'
,
metavar
=
'kitti'
,
help
=
'name of the dataset'
)
parser
.
add_argument
(
'--root-path'
,
type
=
str
,
default
=
'./data/kitti'
,
help
=
'specify the root path of dataset'
)
parser
.
add_argument
(
'--version'
,
type
=
str
,
default
=
'v1.0'
,
required
=
False
,
help
=
'specify the dataset version, no need for kitti'
)
parser
.
add_argument
(
'--max-sweeps'
,
type
=
int
,
default
=
10
,
required
=
False
,
help
=
'specify sweeps of lidar per example'
)
parser
.
add_argument
(
'--with-plane'
,
action
=
'store_true'
,
help
=
'Whether to use plane information for kitti.'
)
parser
.
add_argument
(
'--num-points'
,
type
=
int
,
default
=-
1
,
help
=
'Number of points to sample for indoor datasets.'
)
parser
.
add_argument
(
'--out-dir'
,
type
=
str
,
default
=
'./data/kitti'
,
required
=
False
,
help
=
'name of info pkl'
)
parser
.
add_argument
(
'--extra-tag'
,
type
=
str
,
default
=
'kitti'
)
parser
.
add_argument
(
'--workers'
,
type
=
int
,
default
=
4
,
help
=
'number of threads to be used'
)
args
=
parser
.
parse_args
()
if
__name__
==
'__main__'
:
if
args
.
dataset
==
'kitti'
:
kitti_data_prep
(
root_path
=
args
.
root_path
,
info_prefix
=
args
.
extra_tag
,
version
=
args
.
version
,
out_dir
=
args
.
out_dir
,
with_plane
=
args
.
with_plane
)
elif
args
.
dataset
==
'nuscenes'
and
args
.
version
!=
'v1.0-mini'
:
train_version
=
f
'
{
args
.
version
}
-trainval'
nuscenes_data_prep
(
root_path
=
args
.
root_path
,
info_prefix
=
args
.
extra_tag
,
version
=
train_version
,
dataset_name
=
'NuScenesDataset'
,
out_dir
=
args
.
out_dir
,
max_sweeps
=
args
.
max_sweeps
)
test_version
=
f
'
{
args
.
version
}
-test'
nuscenes_data_prep
(
root_path
=
args
.
root_path
,
info_prefix
=
args
.
extra_tag
,
version
=
test_version
,
dataset_name
=
'NuScenesDataset'
,
out_dir
=
args
.
out_dir
,
max_sweeps
=
args
.
max_sweeps
)
elif
args
.
dataset
==
'nuscenes'
and
args
.
version
==
'v1.0-mini'
:
train_version
=
f
'
{
args
.
version
}
'
nuscenes_data_prep
(
root_path
=
args
.
root_path
,
info_prefix
=
args
.
extra_tag
,
version
=
train_version
,
dataset_name
=
'NuScenesDataset'
,
out_dir
=
args
.
out_dir
,
max_sweeps
=
args
.
max_sweeps
)
elif
args
.
dataset
==
'lyft'
:
train_version
=
f
'
{
args
.
version
}
-train'
lyft_data_prep
(
root_path
=
args
.
root_path
,
info_prefix
=
args
.
extra_tag
,
version
=
train_version
,
max_sweeps
=
args
.
max_sweeps
)
test_version
=
f
'
{
args
.
version
}
-test'
lyft_data_prep
(
root_path
=
args
.
root_path
,
info_prefix
=
args
.
extra_tag
,
version
=
test_version
,
max_sweeps
=
args
.
max_sweeps
)
elif
args
.
dataset
==
'waymo'
:
waymo_data_prep
(
root_path
=
args
.
root_path
,
info_prefix
=
args
.
extra_tag
,
version
=
args
.
version
,
out_dir
=
args
.
out_dir
,
workers
=
args
.
workers
,
max_sweeps
=
args
.
max_sweeps
)
elif
args
.
dataset
==
'scannet'
:
scannet_data_prep
(
root_path
=
args
.
root_path
,
info_prefix
=
args
.
extra_tag
,
out_dir
=
args
.
out_dir
,
workers
=
args
.
workers
)
elif
args
.
dataset
==
's3dis'
:
s3dis_data_prep
(
root_path
=
args
.
root_path
,
info_prefix
=
args
.
extra_tag
,
out_dir
=
args
.
out_dir
,
workers
=
args
.
workers
)
elif
args
.
dataset
==
'sunrgbd'
:
sunrgbd_data_prep
(
root_path
=
args
.
root_path
,
info_prefix
=
args
.
extra_tag
,
num_points
=
args
.
num_points
,
out_dir
=
args
.
out_dir
,
workers
=
args
.
workers
)
autonomous_driving/openlane-v2/tools/create_data.sh
0 → 100644
View file @
305e110f
#!/usr/bin/env bash
set
-x
export
PYTHONPATH
=
`
pwd
`
:
$PYTHONPATH
PARTITION
=
$1
JOB_NAME
=
$2
DATASET
=
$3
GPUS
=
${
GPUS
:-
1
}
GPUS_PER_NODE
=
${
GPUS_PER_NODE
:-
1
}
SRUN_ARGS
=
${
SRUN_ARGS
:-
""
}
JOB_NAME
=
create_data
srun
-p
${
PARTITION
}
\
--job-name
=
${
JOB_NAME
}
\
--gres
=
gpu:
${
GPUS_PER_NODE
}
\
--ntasks
=
${
GPUS
}
\
--ntasks-per-node
=
${
GPUS_PER_NODE
}
\
--kill-on-bad-exit
=
1
\
${
SRUN_ARGS
}
\
python
-u
tools/create_data.py
${
DATASET
}
\
--root-path
./data/
${
DATASET
}
\
--out-dir
./data/
${
DATASET
}
\
--extra-tag
${
DATASET
}
autonomous_driving/openlane-v2/tools/data_converter/__init__.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
autonomous_driving/openlane-v2/tools/data_converter/create_gt_database.py
0 → 100644
View file @
305e110f
This diff is collapsed.
Click to expand it.
autonomous_driving/openlane-v2/tools/data_converter/indoor_converter.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
os
import
mmcv
import
numpy
as
np
from
tools.data_converter.s3dis_data_utils
import
S3DISData
,
S3DISSegData
from
tools.data_converter.scannet_data_utils
import
ScanNetData
,
ScanNetSegData
from
tools.data_converter.sunrgbd_data_utils
import
SUNRGBDData
def
create_indoor_info_file
(
data_path
,
pkl_prefix
=
'sunrgbd'
,
save_path
=
None
,
workers
=
4
,
**
kwargs
):
"""Create indoor information file.
Get information of the raw data and save it to the pkl file.
Args:
data_path (str): Path of the data.
pkl_prefix (str, optional): Prefix of the pkl to be saved.
Default: 'sunrgbd'.
save_path (str, optional): Path of the pkl to be saved. Default: None.
workers (int, optional): Number of threads to be used. Default: 4.
kwargs (dict): Additional parameters for dataset-specific Data class.
May include `use_v1` for SUN RGB-D and `num_points`.
"""
assert
os
.
path
.
exists
(
data_path
)
assert
pkl_prefix
in
[
'sunrgbd'
,
'scannet'
,
's3dis'
],
\
f
'unsupported indoor dataset
{
pkl_prefix
}
'
save_path
=
data_path
if
save_path
is
None
else
save_path
assert
os
.
path
.
exists
(
save_path
)
# generate infos for both detection and segmentation task
if
pkl_prefix
in
[
'sunrgbd'
,
'scannet'
]:
train_filename
=
os
.
path
.
join
(
save_path
,
f
'
{
pkl_prefix
}
_infos_train.pkl'
)
val_filename
=
os
.
path
.
join
(
save_path
,
f
'
{
pkl_prefix
}
_infos_val.pkl'
)
if
pkl_prefix
==
'sunrgbd'
:
# SUN RGB-D has a train-val split
num_points
=
kwargs
.
get
(
'num_points'
,
-
1
)
use_v1
=
kwargs
.
get
(
'use_v1'
,
False
)
train_dataset
=
SUNRGBDData
(
root_path
=
data_path
,
split
=
'train'
,
use_v1
=
use_v1
,
num_points
=
num_points
)
val_dataset
=
SUNRGBDData
(
root_path
=
data_path
,
split
=
'val'
,
use_v1
=
use_v1
,
num_points
=
num_points
)
else
:
# ScanNet has a train-val-test split
train_dataset
=
ScanNetData
(
root_path
=
data_path
,
split
=
'train'
)
val_dataset
=
ScanNetData
(
root_path
=
data_path
,
split
=
'val'
)
test_dataset
=
ScanNetData
(
root_path
=
data_path
,
split
=
'test'
)
test_filename
=
os
.
path
.
join
(
save_path
,
f
'
{
pkl_prefix
}
_infos_test.pkl'
)
infos_train
=
train_dataset
.
get_infos
(
num_workers
=
workers
,
has_label
=
True
)
mmcv
.
dump
(
infos_train
,
train_filename
,
'pkl'
)
print
(
f
'
{
pkl_prefix
}
info train file is saved to
{
train_filename
}
'
)
infos_val
=
val_dataset
.
get_infos
(
num_workers
=
workers
,
has_label
=
True
)
mmcv
.
dump
(
infos_val
,
val_filename
,
'pkl'
)
print
(
f
'
{
pkl_prefix
}
info val file is saved to
{
val_filename
}
'
)
if
pkl_prefix
==
'scannet'
:
infos_test
=
test_dataset
.
get_infos
(
num_workers
=
workers
,
has_label
=
False
)
mmcv
.
dump
(
infos_test
,
test_filename
,
'pkl'
)
print
(
f
'
{
pkl_prefix
}
info test file is saved to
{
test_filename
}
'
)
# generate infos for the semantic segmentation task
# e.g. re-sampled scene indexes and label weights
# scene indexes are used to re-sample rooms with different number of points
# label weights are used to balance classes with different number of points
if
pkl_prefix
==
'scannet'
:
# label weight computation function is adopted from
# https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24
num_points
=
kwargs
.
get
(
'num_points'
,
8192
)
train_dataset
=
ScanNetSegData
(
data_root
=
data_path
,
ann_file
=
train_filename
,
split
=
'train'
,
num_points
=
num_points
,
label_weight_func
=
lambda
x
:
1.0
/
np
.
log
(
1.2
+
x
))
# TODO: do we need to generate on val set?
val_dataset
=
ScanNetSegData
(
data_root
=
data_path
,
ann_file
=
val_filename
,
split
=
'val'
,
num_points
=
num_points
,
label_weight_func
=
lambda
x
:
1.0
/
np
.
log
(
1.2
+
x
))
# no need to generate for test set
train_dataset
.
get_seg_infos
()
val_dataset
.
get_seg_infos
()
elif
pkl_prefix
==
's3dis'
:
# S3DIS doesn't have a fixed train-val split
# it has 6 areas instead, so we generate info file for each of them
# in training, we will use dataset to wrap different areas
splits
=
[
f
'Area_
{
i
}
'
for
i
in
[
1
,
2
,
3
,
4
,
5
,
6
]]
for
split
in
splits
:
dataset
=
S3DISData
(
root_path
=
data_path
,
split
=
split
)
info
=
dataset
.
get_infos
(
num_workers
=
workers
,
has_label
=
True
)
filename
=
os
.
path
.
join
(
save_path
,
f
'
{
pkl_prefix
}
_infos_
{
split
}
.pkl'
)
mmcv
.
dump
(
info
,
filename
,
'pkl'
)
print
(
f
'
{
pkl_prefix
}
info
{
split
}
file is saved to
{
filename
}
'
)
num_points
=
kwargs
.
get
(
'num_points'
,
4096
)
seg_dataset
=
S3DISSegData
(
data_root
=
data_path
,
ann_file
=
filename
,
split
=
split
,
num_points
=
num_points
,
label_weight_func
=
lambda
x
:
1.0
/
np
.
log
(
1.2
+
x
))
seg_dataset
.
get_seg_infos
()
autonomous_driving/openlane-v2/tools/data_converter/kitti_converter.py
0 → 100644
View file @
305e110f
This diff is collapsed.
Click to expand it.
autonomous_driving/openlane-v2/tools/data_converter/kitti_data_utils.py
0 → 100644
View file @
305e110f
This diff is collapsed.
Click to expand it.
autonomous_driving/openlane-v2/tools/data_converter/lyft_converter.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
os
from
logging
import
warning
from
os
import
path
as
osp
import
mmcv
import
numpy
as
np
from
lyft_dataset_sdk.lyftdataset
import
LyftDataset
as
Lyft
from
pyquaternion
import
Quaternion
from
mmdet3d.datasets
import
LyftDataset
from
.nuscenes_converter
import
(
get_2d_boxes
,
get_available_scenes
,
obtain_sensor2top
)
lyft_categories
=
(
'car'
,
'truck'
,
'bus'
,
'emergency_vehicle'
,
'other_vehicle'
,
'motorcycle'
,
'bicycle'
,
'pedestrian'
,
'animal'
)
def
create_lyft_infos
(
root_path
,
info_prefix
,
version
=
'v1.01-train'
,
max_sweeps
=
10
):
"""Create info file of lyft dataset.
Given the raw data, generate its related info file in pkl format.
Args:
root_path (str): Path of the data root.
info_prefix (str): Prefix of the info file to be generated.
version (str, optional): Version of the data.
Default: 'v1.01-train'.
max_sweeps (int, optional): Max number of sweeps.
Default: 10.
"""
lyft
=
Lyft
(
data_path
=
osp
.
join
(
root_path
,
version
),
json_path
=
osp
.
join
(
root_path
,
version
,
version
),
verbose
=
True
)
available_vers
=
[
'v1.01-train'
,
'v1.01-test'
]
assert
version
in
available_vers
if
version
==
'v1.01-train'
:
train_scenes
=
mmcv
.
list_from_file
(
'data/lyft/train.txt'
)
val_scenes
=
mmcv
.
list_from_file
(
'data/lyft/val.txt'
)
elif
version
==
'v1.01-test'
:
train_scenes
=
mmcv
.
list_from_file
(
'data/lyft/test.txt'
)
val_scenes
=
[]
else
:
raise
ValueError
(
'unknown'
)
# filter existing scenes.
available_scenes
=
get_available_scenes
(
lyft
)
available_scene_names
=
[
s
[
'name'
]
for
s
in
available_scenes
]
train_scenes
=
list
(
filter
(
lambda
x
:
x
in
available_scene_names
,
train_scenes
))
val_scenes
=
list
(
filter
(
lambda
x
:
x
in
available_scene_names
,
val_scenes
))
train_scenes
=
set
([
available_scenes
[
available_scene_names
.
index
(
s
)][
'token'
]
for
s
in
train_scenes
])
val_scenes
=
set
([
available_scenes
[
available_scene_names
.
index
(
s
)][
'token'
]
for
s
in
val_scenes
])
test
=
'test'
in
version
if
test
:
print
(
f
'test scene:
{
len
(
train_scenes
)
}
'
)
else
:
print
(
f
'train scene:
{
len
(
train_scenes
)
}
,
\
val scene:
{
len
(
val_scenes
)
}
'
)
train_lyft_infos
,
val_lyft_infos
=
_fill_trainval_infos
(
lyft
,
train_scenes
,
val_scenes
,
test
,
max_sweeps
=
max_sweeps
)
metadata
=
dict
(
version
=
version
)
if
test
:
print
(
f
'test sample:
{
len
(
train_lyft_infos
)
}
'
)
data
=
dict
(
infos
=
train_lyft_infos
,
metadata
=
metadata
)
info_name
=
f
'
{
info_prefix
}
_infos_test'
info_path
=
osp
.
join
(
root_path
,
f
'
{
info_name
}
.pkl'
)
mmcv
.
dump
(
data
,
info_path
)
else
:
print
(
f
'train sample:
{
len
(
train_lyft_infos
)
}
,
\
val sample:
{
len
(
val_lyft_infos
)
}
'
)
data
=
dict
(
infos
=
train_lyft_infos
,
metadata
=
metadata
)
train_info_name
=
f
'
{
info_prefix
}
_infos_train'
info_path
=
osp
.
join
(
root_path
,
f
'
{
train_info_name
}
.pkl'
)
mmcv
.
dump
(
data
,
info_path
)
data
[
'infos'
]
=
val_lyft_infos
val_info_name
=
f
'
{
info_prefix
}
_infos_val'
info_val_path
=
osp
.
join
(
root_path
,
f
'
{
val_info_name
}
.pkl'
)
mmcv
.
dump
(
data
,
info_val_path
)
def
_fill_trainval_infos
(
lyft
,
train_scenes
,
val_scenes
,
test
=
False
,
max_sweeps
=
10
):
"""Generate the train/val infos from the raw data.
Args:
lyft (:obj:`LyftDataset`): Dataset class in the Lyft dataset.
train_scenes (list[str]): Basic information of training scenes.
val_scenes (list[str]): Basic information of validation scenes.
test (bool, optional): Whether use the test mode. In the test mode, no
annotations can be accessed. Default: False.
max_sweeps (int, optional): Max number of sweeps. Default: 10.
Returns:
tuple[list[dict]]: Information of training set and
validation set that will be saved to the info file.
"""
train_lyft_infos
=
[]
val_lyft_infos
=
[]
for
sample
in
mmcv
.
track_iter_progress
(
lyft
.
sample
):
lidar_token
=
sample
[
'data'
][
'LIDAR_TOP'
]
sd_rec
=
lyft
.
get
(
'sample_data'
,
sample
[
'data'
][
'LIDAR_TOP'
])
cs_record
=
lyft
.
get
(
'calibrated_sensor'
,
sd_rec
[
'calibrated_sensor_token'
])
pose_record
=
lyft
.
get
(
'ego_pose'
,
sd_rec
[
'ego_pose_token'
])
abs_lidar_path
,
boxes
,
_
=
lyft
.
get_sample_data
(
lidar_token
)
# nuScenes devkit returns more convenient relative paths while
# lyft devkit returns absolute paths
abs_lidar_path
=
str
(
abs_lidar_path
)
# absolute path
lidar_path
=
abs_lidar_path
.
split
(
f
'
{
os
.
getcwd
()
}
/'
)[
-
1
]
# relative path
mmcv
.
check_file_exist
(
lidar_path
)
info
=
{
'lidar_path'
:
lidar_path
,
'token'
:
sample
[
'token'
],
'sweeps'
:
[],
'cams'
:
dict
(),
'lidar2ego_translation'
:
cs_record
[
'translation'
],
'lidar2ego_rotation'
:
cs_record
[
'rotation'
],
'ego2global_translation'
:
pose_record
[
'translation'
],
'ego2global_rotation'
:
pose_record
[
'rotation'
],
'timestamp'
:
sample
[
'timestamp'
],
}
l2e_r
=
info
[
'lidar2ego_rotation'
]
l2e_t
=
info
[
'lidar2ego_translation'
]
e2g_r
=
info
[
'ego2global_rotation'
]
e2g_t
=
info
[
'ego2global_translation'
]
l2e_r_mat
=
Quaternion
(
l2e_r
).
rotation_matrix
e2g_r_mat
=
Quaternion
(
e2g_r
).
rotation_matrix
# obtain 6 image's information per frame
camera_types
=
[
'CAM_FRONT'
,
'CAM_FRONT_RIGHT'
,
'CAM_FRONT_LEFT'
,
'CAM_BACK'
,
'CAM_BACK_LEFT'
,
'CAM_BACK_RIGHT'
,
]
for
cam
in
camera_types
:
cam_token
=
sample
[
'data'
][
cam
]
cam_path
,
_
,
cam_intrinsic
=
lyft
.
get_sample_data
(
cam_token
)
cam_info
=
obtain_sensor2top
(
lyft
,
cam_token
,
l2e_t
,
l2e_r_mat
,
e2g_t
,
e2g_r_mat
,
cam
)
cam_info
.
update
(
cam_intrinsic
=
cam_intrinsic
)
info
[
'cams'
].
update
({
cam
:
cam_info
})
# obtain sweeps for a single key-frame
sd_rec
=
lyft
.
get
(
'sample_data'
,
sample
[
'data'
][
'LIDAR_TOP'
])
sweeps
=
[]
while
len
(
sweeps
)
<
max_sweeps
:
if
not
sd_rec
[
'prev'
]
==
''
:
sweep
=
obtain_sensor2top
(
lyft
,
sd_rec
[
'prev'
],
l2e_t
,
l2e_r_mat
,
e2g_t
,
e2g_r_mat
,
'lidar'
)
sweeps
.
append
(
sweep
)
sd_rec
=
lyft
.
get
(
'sample_data'
,
sd_rec
[
'prev'
])
else
:
break
info
[
'sweeps'
]
=
sweeps
# obtain annotation
if
not
test
:
annotations
=
[
lyft
.
get
(
'sample_annotation'
,
token
)
for
token
in
sample
[
'anns'
]
]
locs
=
np
.
array
([
b
.
center
for
b
in
boxes
]).
reshape
(
-
1
,
3
)
dims
=
np
.
array
([
b
.
wlh
for
b
in
boxes
]).
reshape
(
-
1
,
3
)
rots
=
np
.
array
([
b
.
orientation
.
yaw_pitch_roll
[
0
]
for
b
in
boxes
]).
reshape
(
-
1
,
1
)
names
=
[
b
.
name
for
b
in
boxes
]
for
i
in
range
(
len
(
names
)):
if
names
[
i
]
in
LyftDataset
.
NameMapping
:
names
[
i
]
=
LyftDataset
.
NameMapping
[
names
[
i
]]
names
=
np
.
array
(
names
)
# we need to convert box size to
# the format of our lidar coordinate system
# which is x_size, y_size, z_size (corresponding to l, w, h)
gt_boxes
=
np
.
concatenate
([
locs
,
dims
[:,
[
1
,
0
,
2
]],
rots
],
axis
=
1
)
assert
len
(
gt_boxes
)
==
len
(
annotations
),
f
'
{
len
(
gt_boxes
)
}
,
{
len
(
annotations
)
}
'
info
[
'gt_boxes'
]
=
gt_boxes
info
[
'gt_names'
]
=
names
info
[
'num_lidar_pts'
]
=
np
.
array
(
[
a
[
'num_lidar_pts'
]
for
a
in
annotations
])
info
[
'num_radar_pts'
]
=
np
.
array
(
[
a
[
'num_radar_pts'
]
for
a
in
annotations
])
if
sample
[
'scene_token'
]
in
train_scenes
:
train_lyft_infos
.
append
(
info
)
else
:
val_lyft_infos
.
append
(
info
)
return
train_lyft_infos
,
val_lyft_infos
def
export_2d_annotation
(
root_path
,
info_path
,
version
):
"""Export 2d annotation from the info file and raw data.
Args:
root_path (str): Root path of the raw data.
info_path (str): Path of the info file.
version (str): Dataset version.
"""
warning
.
warn
(
'DeprecationWarning: 2D annotations are not used on the '
'Lyft dataset. The function export_2d_annotation will be '
'deprecated.'
)
# get bbox annotations for camera
camera_types
=
[
'CAM_FRONT'
,
'CAM_FRONT_RIGHT'
,
'CAM_FRONT_LEFT'
,
'CAM_BACK'
,
'CAM_BACK_LEFT'
,
'CAM_BACK_RIGHT'
,
]
lyft_infos
=
mmcv
.
load
(
info_path
)[
'infos'
]
lyft
=
Lyft
(
data_path
=
osp
.
join
(
root_path
,
version
),
json_path
=
osp
.
join
(
root_path
,
version
,
version
),
verbose
=
True
)
# info_2d_list = []
cat2Ids
=
[
dict
(
id
=
lyft_categories
.
index
(
cat_name
),
name
=
cat_name
)
for
cat_name
in
lyft_categories
]
coco_ann_id
=
0
coco_2d_dict
=
dict
(
annotations
=
[],
images
=
[],
categories
=
cat2Ids
)
for
info
in
mmcv
.
track_iter_progress
(
lyft_infos
):
for
cam
in
camera_types
:
cam_info
=
info
[
'cams'
][
cam
]
coco_infos
=
get_2d_boxes
(
lyft
,
cam_info
[
'sample_data_token'
],
visibilities
=
[
''
,
'1'
,
'2'
,
'3'
,
'4'
])
(
height
,
width
,
_
)
=
mmcv
.
imread
(
cam_info
[
'data_path'
]).
shape
coco_2d_dict
[
'images'
].
append
(
dict
(
file_name
=
cam_info
[
'data_path'
],
id
=
cam_info
[
'sample_data_token'
],
width
=
width
,
height
=
height
))
for
coco_info
in
coco_infos
:
if
coco_info
is
None
:
continue
# add an empty key for coco format
coco_info
[
'segmentation'
]
=
[]
coco_info
[
'id'
]
=
coco_ann_id
coco_2d_dict
[
'annotations'
].
append
(
coco_info
)
coco_ann_id
+=
1
mmcv
.
dump
(
coco_2d_dict
,
f
'
{
info_path
[:
-
4
]
}
.coco.json'
)
autonomous_driving/openlane-v2/tools/data_converter/lyft_data_fixer.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
argparse
import
os
import
numpy
as
np
def
fix_lyft
(
root_folder
=
'./data/lyft'
,
version
=
'v1.01'
):
# refer to https://www.kaggle.com/c/3d-object-detection-for-autonomous-vehicles/discussion/110000 # noqa
lidar_path
=
'lidar/host-a011_lidar1_1233090652702363606.bin'
root_folder
=
os
.
path
.
join
(
root_folder
,
f
'
{
version
}
-train'
)
lidar_path
=
os
.
path
.
join
(
root_folder
,
lidar_path
)
assert
os
.
path
.
isfile
(
lidar_path
),
f
'Please download the complete Lyft '
\
f
'dataset and make sure
{
lidar_path
}
is present.'
points
=
np
.
fromfile
(
lidar_path
,
dtype
=
np
.
float32
,
count
=-
1
)
try
:
points
.
reshape
([
-
1
,
5
])
print
(
f
'This fix is not required for version
{
version
}
.'
)
except
ValueError
:
new_points
=
np
.
array
(
list
(
points
)
+
[
100.0
,
1.0
],
dtype
=
'float32'
)
new_points
.
tofile
(
lidar_path
)
print
(
f
'Appended 100.0 and 1.0 to the end of
{
lidar_path
}
.'
)
parser
=
argparse
.
ArgumentParser
(
description
=
'Lyft dataset fixer arg parser'
)
parser
.
add_argument
(
'--root-folder'
,
type
=
str
,
default
=
'./data/lyft'
,
help
=
'specify the root path of Lyft dataset'
)
parser
.
add_argument
(
'--version'
,
type
=
str
,
default
=
'v1.01'
,
help
=
'specify Lyft dataset version'
)
args
=
parser
.
parse_args
()
if
__name__
==
'__main__'
:
fix_lyft
(
root_folder
=
args
.
root_folder
,
version
=
args
.
version
)
autonomous_driving/openlane-v2/tools/data_converter/nuimage_converter.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
argparse
import
base64
from
os
import
path
as
osp
import
mmcv
import
numpy
as
np
from
nuimages
import
NuImages
from
nuimages.utils.utils
import
mask_decode
,
name_to_index_mapping
nus_categories
=
(
'car'
,
'truck'
,
'trailer'
,
'bus'
,
'construction_vehicle'
,
'bicycle'
,
'motorcycle'
,
'pedestrian'
,
'traffic_cone'
,
'barrier'
)
NAME_MAPPING
=
{
'movable_object.barrier'
:
'barrier'
,
'vehicle.bicycle'
:
'bicycle'
,
'vehicle.bus.bendy'
:
'bus'
,
'vehicle.bus.rigid'
:
'bus'
,
'vehicle.car'
:
'car'
,
'vehicle.construction'
:
'construction_vehicle'
,
'vehicle.motorcycle'
:
'motorcycle'
,
'human.pedestrian.adult'
:
'pedestrian'
,
'human.pedestrian.child'
:
'pedestrian'
,
'human.pedestrian.construction_worker'
:
'pedestrian'
,
'human.pedestrian.police_officer'
:
'pedestrian'
,
'movable_object.trafficcone'
:
'traffic_cone'
,
'vehicle.trailer'
:
'trailer'
,
'vehicle.truck'
:
'truck'
,
}
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Data converter arg parser'
)
parser
.
add_argument
(
'--data-root'
,
type
=
str
,
default
=
'./data/nuimages'
,
help
=
'specify the root path of dataset'
)
parser
.
add_argument
(
'--version'
,
type
=
str
,
nargs
=
'+'
,
default
=
[
'v1.0-mini'
],
required
=
False
,
help
=
'specify the dataset version'
)
parser
.
add_argument
(
'--out-dir'
,
type
=
str
,
default
=
'./data/nuimages/annotations/'
,
required
=
False
,
help
=
'path to save the exported json'
)
parser
.
add_argument
(
'--nproc'
,
type
=
int
,
default
=
4
,
required
=
False
,
help
=
'workers to process semantic masks'
)
parser
.
add_argument
(
'--extra-tag'
,
type
=
str
,
default
=
'nuimages'
)
args
=
parser
.
parse_args
()
return
args
def
get_img_annos
(
nuim
,
img_info
,
cat2id
,
out_dir
,
data_root
,
seg_root
):
"""Get semantic segmentation map for an image.
Args:
nuim (obj:`NuImages`): NuImages dataset object
img_info (dict): Meta information of img
Returns:
np.ndarray: Semantic segmentation map of the image
"""
sd_token
=
img_info
[
'token'
]
image_id
=
img_info
[
'id'
]
name_to_index
=
name_to_index_mapping
(
nuim
.
category
)
# Get image data.
width
,
height
=
img_info
[
'width'
],
img_info
[
'height'
]
semseg_mask
=
np
.
zeros
((
height
,
width
)).
astype
(
'uint8'
)
# Load stuff / surface regions.
surface_anns
=
[
o
for
o
in
nuim
.
surface_ann
if
o
[
'sample_data_token'
]
==
sd_token
]
# Draw stuff / surface regions.
for
ann
in
surface_anns
:
# Get color and mask.
category_token
=
ann
[
'category_token'
]
category_name
=
nuim
.
get
(
'category'
,
category_token
)[
'name'
]
if
ann
[
'mask'
]
is
None
:
continue
mask
=
mask_decode
(
ann
[
'mask'
])
# Draw mask for semantic segmentation.
semseg_mask
[
mask
==
1
]
=
name_to_index
[
category_name
]
# Load object instances.
object_anns
=
[
o
for
o
in
nuim
.
object_ann
if
o
[
'sample_data_token'
]
==
sd_token
]
# Sort by token to ensure that objects always appear in the
# instance mask in the same order.
object_anns
=
sorted
(
object_anns
,
key
=
lambda
k
:
k
[
'token'
])
# Draw object instances.
# The 0 index is reserved for background; thus, the instances
# should start from index 1.
annotations
=
[]
for
i
,
ann
in
enumerate
(
object_anns
,
start
=
1
):
# Get color, box, mask and name.
category_token
=
ann
[
'category_token'
]
category_name
=
nuim
.
get
(
'category'
,
category_token
)[
'name'
]
if
ann
[
'mask'
]
is
None
:
continue
mask
=
mask_decode
(
ann
[
'mask'
])
# Draw masks for semantic segmentation and instance segmentation.
semseg_mask
[
mask
==
1
]
=
name_to_index
[
category_name
]
if
category_name
in
NAME_MAPPING
:
cat_name
=
NAME_MAPPING
[
category_name
]
cat_id
=
cat2id
[
cat_name
]
x_min
,
y_min
,
x_max
,
y_max
=
ann
[
'bbox'
]
# encode calibrated instance mask
mask_anno
=
dict
()
mask_anno
[
'counts'
]
=
base64
.
b64decode
(
ann
[
'mask'
][
'counts'
]).
decode
()
mask_anno
[
'size'
]
=
ann
[
'mask'
][
'size'
]
data_anno
=
dict
(
image_id
=
image_id
,
category_id
=
cat_id
,
bbox
=
[
x_min
,
y_min
,
x_max
-
x_min
,
y_max
-
y_min
],
area
=
(
x_max
-
x_min
)
*
(
y_max
-
y_min
),
segmentation
=
mask_anno
,
iscrowd
=
0
)
annotations
.
append
(
data_anno
)
# after process, save semantic masks
img_filename
=
img_info
[
'file_name'
]
seg_filename
=
img_filename
.
replace
(
'jpg'
,
'png'
)
seg_filename
=
osp
.
join
(
seg_root
,
seg_filename
)
mmcv
.
imwrite
(
semseg_mask
,
seg_filename
)
return
annotations
,
np
.
max
(
semseg_mask
)
def
export_nuim_to_coco
(
nuim
,
data_root
,
out_dir
,
extra_tag
,
version
,
nproc
):
print
(
'Process category information'
)
categories
=
[]
categories
=
[
dict
(
id
=
nus_categories
.
index
(
cat_name
),
name
=
cat_name
)
for
cat_name
in
nus_categories
]
cat2id
=
{
k_v
[
'name'
]:
k_v
[
'id'
]
for
k_v
in
categories
}
images
=
[]
print
(
'Process image meta information...'
)
for
sample_info
in
mmcv
.
track_iter_progress
(
nuim
.
sample_data
):
if
sample_info
[
'is_key_frame'
]:
img_idx
=
len
(
images
)
images
.
append
(
dict
(
id
=
img_idx
,
token
=
sample_info
[
'token'
],
file_name
=
sample_info
[
'filename'
],
width
=
sample_info
[
'width'
],
height
=
sample_info
[
'height'
]))
seg_root
=
f
'
{
out_dir
}
semantic_masks'
mmcv
.
mkdir_or_exist
(
seg_root
)
mmcv
.
mkdir_or_exist
(
osp
.
join
(
data_root
,
'calibrated'
))
global
process_img_anno
def
process_img_anno
(
img_info
):
single_img_annos
,
max_cls_id
=
get_img_annos
(
nuim
,
img_info
,
cat2id
,
out_dir
,
data_root
,
seg_root
)
return
single_img_annos
,
max_cls_id
print
(
'Process img annotations...'
)
if
nproc
>
1
:
outputs
=
mmcv
.
track_parallel_progress
(
process_img_anno
,
images
,
nproc
=
nproc
)
else
:
outputs
=
[]
for
img_info
in
mmcv
.
track_iter_progress
(
images
):
outputs
.
append
(
process_img_anno
(
img_info
))
# Determine the index of object annotation
print
(
'Process annotation information...'
)
annotations
=
[]
max_cls_ids
=
[]
for
single_img_annos
,
max_cls_id
in
outputs
:
max_cls_ids
.
append
(
max_cls_id
)
for
img_anno
in
single_img_annos
:
img_anno
.
update
(
id
=
len
(
annotations
))
annotations
.
append
(
img_anno
)
max_cls_id
=
max
(
max_cls_ids
)
print
(
f
'Max ID of class in the semantic map:
{
max_cls_id
}
'
)
coco_format_json
=
dict
(
images
=
images
,
annotations
=
annotations
,
categories
=
categories
)
mmcv
.
mkdir_or_exist
(
out_dir
)
out_file
=
osp
.
join
(
out_dir
,
f
'
{
extra_tag
}
_
{
version
}
.json'
)
print
(
f
'Annotation dumped to
{
out_file
}
'
)
mmcv
.
dump
(
coco_format_json
,
out_file
)
def
main
():
args
=
parse_args
()
for
version
in
args
.
version
:
nuim
=
NuImages
(
dataroot
=
args
.
data_root
,
version
=
version
,
verbose
=
True
,
lazy
=
True
)
export_nuim_to_coco
(
nuim
,
args
.
data_root
,
args
.
out_dir
,
args
.
extra_tag
,
version
,
args
.
nproc
)
if
__name__
==
'__main__'
:
main
()
autonomous_driving/openlane-v2/tools/data_converter/nuscenes_converter.py
0 → 100644
View file @
305e110f
This diff is collapsed.
Click to expand it.
autonomous_driving/openlane-v2/tools/data_converter/s3dis_data_utils.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
os
from
concurrent
import
futures
as
futures
from
os
import
path
as
osp
import
mmcv
import
numpy
as
np
class
S3DISData
(
object
):
"""S3DIS data.
Generate s3dis infos for s3dis_converter.
Args:
root_path (str): Root path of the raw data.
split (str, optional): Set split type of the data. Default: 'Area_1'.
"""
def
__init__
(
self
,
root_path
,
split
=
'Area_1'
):
self
.
root_dir
=
root_path
self
.
split
=
split
self
.
data_dir
=
osp
.
join
(
root_path
,
'Stanford3dDataset_v1.2_Aligned_Version'
)
# Following `GSDN <https://arxiv.org/abs/2006.12356>`_, use 5 furniture
# classes for detection: table, chair, sofa, bookcase, board.
self
.
cat_ids
=
np
.
array
([
7
,
8
,
9
,
10
,
11
])
self
.
cat_ids2class
=
{
cat_id
:
i
for
i
,
cat_id
in
enumerate
(
list
(
self
.
cat_ids
))
}
assert
split
in
[
'Area_1'
,
'Area_2'
,
'Area_3'
,
'Area_4'
,
'Area_5'
,
'Area_6'
]
self
.
sample_id_list
=
os
.
listdir
(
osp
.
join
(
self
.
data_dir
,
split
))
# conferenceRoom_1
for
sample_id
in
self
.
sample_id_list
:
if
os
.
path
.
isfile
(
osp
.
join
(
self
.
data_dir
,
split
,
sample_id
)):
self
.
sample_id_list
.
remove
(
sample_id
)
def
__len__
(
self
):
return
len
(
self
.
sample_id_list
)
def
get_infos
(
self
,
num_workers
=
4
,
has_label
=
True
,
sample_id_list
=
None
):
"""Get data infos.
This method gets information from the raw data.
Args:
num_workers (int, optional): Number of threads to be used.
Default: 4.
has_label (bool, optional): Whether the data has label.
Default: True.
sample_id_list (list[int], optional): Index list of the sample.
Default: None.
Returns:
infos (list[dict]): Information of the raw data.
"""
def
process_single_scene
(
sample_idx
):
print
(
f
'
{
self
.
split
}
sample_idx:
{
sample_idx
}
'
)
info
=
dict
()
pc_info
=
{
'num_features'
:
6
,
'lidar_idx'
:
f
'
{
self
.
split
}
_
{
sample_idx
}
'
}
info
[
'point_cloud'
]
=
pc_info
pts_filename
=
osp
.
join
(
self
.
root_dir
,
's3dis_data'
,
f
'
{
self
.
split
}
_
{
sample_idx
}
_point.npy'
)
pts_instance_mask_path
=
osp
.
join
(
self
.
root_dir
,
's3dis_data'
,
f
'
{
self
.
split
}
_
{
sample_idx
}
_ins_label.npy'
)
pts_semantic_mask_path
=
osp
.
join
(
self
.
root_dir
,
's3dis_data'
,
f
'
{
self
.
split
}
_
{
sample_idx
}
_sem_label.npy'
)
points
=
np
.
load
(
pts_filename
).
astype
(
np
.
float32
)
pts_instance_mask
=
np
.
load
(
pts_instance_mask_path
).
astype
(
np
.
int
)
pts_semantic_mask
=
np
.
load
(
pts_semantic_mask_path
).
astype
(
np
.
int
)
mmcv
.
mkdir_or_exist
(
osp
.
join
(
self
.
root_dir
,
'points'
))
mmcv
.
mkdir_or_exist
(
osp
.
join
(
self
.
root_dir
,
'instance_mask'
))
mmcv
.
mkdir_or_exist
(
osp
.
join
(
self
.
root_dir
,
'semantic_mask'
))
points
.
tofile
(
osp
.
join
(
self
.
root_dir
,
'points'
,
f
'
{
self
.
split
}
_
{
sample_idx
}
.bin'
))
pts_instance_mask
.
tofile
(
osp
.
join
(
self
.
root_dir
,
'instance_mask'
,
f
'
{
self
.
split
}
_
{
sample_idx
}
.bin'
))
pts_semantic_mask
.
tofile
(
osp
.
join
(
self
.
root_dir
,
'semantic_mask'
,
f
'
{
self
.
split
}
_
{
sample_idx
}
.bin'
))
info
[
'pts_path'
]
=
osp
.
join
(
'points'
,
f
'
{
self
.
split
}
_
{
sample_idx
}
.bin'
)
info
[
'pts_instance_mask_path'
]
=
osp
.
join
(
'instance_mask'
,
f
'
{
self
.
split
}
_
{
sample_idx
}
.bin'
)
info
[
'pts_semantic_mask_path'
]
=
osp
.
join
(
'semantic_mask'
,
f
'
{
self
.
split
}
_
{
sample_idx
}
.bin'
)
info
[
'annos'
]
=
self
.
get_bboxes
(
points
,
pts_instance_mask
,
pts_semantic_mask
)
return
info
sample_id_list
=
sample_id_list
if
sample_id_list
is
not
None
\
else
self
.
sample_id_list
with
futures
.
ThreadPoolExecutor
(
num_workers
)
as
executor
:
infos
=
executor
.
map
(
process_single_scene
,
sample_id_list
)
return
list
(
infos
)
def
get_bboxes
(
self
,
points
,
pts_instance_mask
,
pts_semantic_mask
):
"""Convert instance masks to axis-aligned bounding boxes.
Args:
points (np.array): Scene points of shape (n, 6).
pts_instance_mask (np.ndarray): Instance labels of shape (n,).
pts_semantic_mask (np.ndarray): Semantic labels of shape (n,).
Returns:
dict: A dict containing detection infos with following keys:
- gt_boxes_upright_depth (np.ndarray): Bounding boxes
of shape (n, 6)
- class (np.ndarray): Box labels of shape (n,)
- gt_num (int): Number of boxes.
"""
bboxes
,
labels
=
[],
[]
for
i
in
range
(
1
,
pts_instance_mask
.
max
()
+
1
):
ids
=
pts_instance_mask
==
i
# check if all instance points have same semantic label
assert
pts_semantic_mask
[
ids
].
min
()
==
pts_semantic_mask
[
ids
].
max
()
label
=
pts_semantic_mask
[
ids
][
0
]
# keep only furniture objects
if
label
in
self
.
cat_ids2class
:
labels
.
append
(
self
.
cat_ids2class
[
pts_semantic_mask
[
ids
][
0
]])
pts
=
points
[:,
:
3
][
ids
]
min_pts
=
pts
.
min
(
axis
=
0
)
max_pts
=
pts
.
max
(
axis
=
0
)
locations
=
(
min_pts
+
max_pts
)
/
2
dimensions
=
max_pts
-
min_pts
bboxes
.
append
(
np
.
concatenate
((
locations
,
dimensions
)))
annotation
=
dict
()
# follow ScanNet and SUN RGB-D keys
annotation
[
'gt_boxes_upright_depth'
]
=
np
.
array
(
bboxes
)
annotation
[
'class'
]
=
np
.
array
(
labels
)
annotation
[
'gt_num'
]
=
len
(
labels
)
return
annotation
class
S3DISSegData
(
object
):
"""S3DIS dataset used to generate infos for semantic segmentation task.
Args:
data_root (str): Root path of the raw data.
ann_file (str): The generated scannet infos.
split (str, optional): Set split type of the data. Default: 'train'.
num_points (int, optional): Number of points in each data input.
Default: 8192.
label_weight_func (function, optional): Function to compute the
label weight. Default: None.
"""
def
__init__
(
self
,
data_root
,
ann_file
,
split
=
'Area_1'
,
num_points
=
4096
,
label_weight_func
=
None
):
self
.
data_root
=
data_root
self
.
data_infos
=
mmcv
.
load
(
ann_file
)
self
.
split
=
split
self
.
num_points
=
num_points
self
.
all_ids
=
np
.
arange
(
13
)
# all possible ids
self
.
cat_ids
=
np
.
array
([
0
,
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
])
# used for seg task
self
.
ignore_index
=
len
(
self
.
cat_ids
)
self
.
cat_id2class
=
np
.
ones
((
self
.
all_ids
.
shape
[
0
],),
dtype
=
np
.
int
)
*
\
self
.
ignore_index
for
i
,
cat_id
in
enumerate
(
self
.
cat_ids
):
self
.
cat_id2class
[
cat_id
]
=
i
# label weighting function is taken from
# https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24
self
.
label_weight_func
=
(
lambda
x
:
1.0
/
np
.
log
(
1.2
+
x
))
if
\
label_weight_func
is
None
else
label_weight_func
def
get_seg_infos
(
self
):
scene_idxs
,
label_weight
=
self
.
get_scene_idxs_and_label_weight
()
save_folder
=
osp
.
join
(
self
.
data_root
,
'seg_info'
)
mmcv
.
mkdir_or_exist
(
save_folder
)
np
.
save
(
osp
.
join
(
save_folder
,
f
'
{
self
.
split
}
_resampled_scene_idxs.npy'
),
scene_idxs
)
np
.
save
(
osp
.
join
(
save_folder
,
f
'
{
self
.
split
}
_label_weight.npy'
),
label_weight
)
print
(
f
'
{
self
.
split
}
resampled scene index and label weight saved'
)
def
_convert_to_label
(
self
,
mask
):
"""Convert class_id in loaded segmentation mask to label."""
if
isinstance
(
mask
,
str
):
if
mask
.
endswith
(
'npy'
):
mask
=
np
.
load
(
mask
)
else
:
mask
=
np
.
fromfile
(
mask
,
dtype
=
np
.
int64
)
label
=
self
.
cat_id2class
[
mask
]
return
label
def
get_scene_idxs_and_label_weight
(
self
):
"""Compute scene_idxs for data sampling and label weight for loss
calculation.
We sample more times for scenes with more points. Label_weight is
inversely proportional to number of class points.
"""
num_classes
=
len
(
self
.
cat_ids
)
num_point_all
=
[]
label_weight
=
np
.
zeros
((
num_classes
+
1
,
))
# ignore_index
for
data_info
in
self
.
data_infos
:
label
=
self
.
_convert_to_label
(
osp
.
join
(
self
.
data_root
,
data_info
[
'pts_semantic_mask_path'
]))
num_point_all
.
append
(
label
.
shape
[
0
])
class_count
,
_
=
np
.
histogram
(
label
,
range
(
num_classes
+
2
))
label_weight
+=
class_count
# repeat scene_idx for num_scene_point // num_sample_point times
sample_prob
=
np
.
array
(
num_point_all
)
/
float
(
np
.
sum
(
num_point_all
))
num_iter
=
int
(
np
.
sum
(
num_point_all
)
/
float
(
self
.
num_points
))
scene_idxs
=
[]
for
idx
in
range
(
len
(
self
.
data_infos
)):
scene_idxs
.
extend
([
idx
]
*
int
(
round
(
sample_prob
[
idx
]
*
num_iter
)))
scene_idxs
=
np
.
array
(
scene_idxs
).
astype
(
np
.
int32
)
# calculate label weight, adopted from PointNet++
label_weight
=
label_weight
[:
-
1
].
astype
(
np
.
float32
)
label_weight
=
label_weight
/
label_weight
.
sum
()
label_weight
=
self
.
label_weight_func
(
label_weight
).
astype
(
np
.
float32
)
return
scene_idxs
,
label_weight
autonomous_driving/openlane-v2/tools/data_converter/scannet_data_utils.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
os
from
concurrent
import
futures
as
futures
from
os
import
path
as
osp
import
mmcv
import
numpy
as
np
class
ScanNetData
(
object
):
"""ScanNet data.
Generate scannet infos for scannet_converter.
Args:
root_path (str): Root path of the raw data.
split (str, optional): Set split type of the data. Default: 'train'.
"""
def
__init__
(
self
,
root_path
,
split
=
'train'
):
self
.
root_dir
=
root_path
self
.
split
=
split
self
.
split_dir
=
osp
.
join
(
root_path
)
self
.
classes
=
[
'cabinet'
,
'bed'
,
'chair'
,
'sofa'
,
'table'
,
'door'
,
'window'
,
'bookshelf'
,
'picture'
,
'counter'
,
'desk'
,
'curtain'
,
'refrigerator'
,
'showercurtrain'
,
'toilet'
,
'sink'
,
'bathtub'
,
'garbagebin'
]
self
.
cat2label
=
{
cat
:
self
.
classes
.
index
(
cat
)
for
cat
in
self
.
classes
}
self
.
label2cat
=
{
self
.
cat2label
[
t
]:
t
for
t
in
self
.
cat2label
}
self
.
cat_ids
=
np
.
array
(
[
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
,
14
,
16
,
24
,
28
,
33
,
34
,
36
,
39
])
self
.
cat_ids2class
=
{
nyu40id
:
i
for
i
,
nyu40id
in
enumerate
(
list
(
self
.
cat_ids
))
}
assert
split
in
[
'train'
,
'val'
,
'test'
]
split_file
=
osp
.
join
(
self
.
root_dir
,
'meta_data'
,
f
'scannetv2_
{
split
}
.txt'
)
mmcv
.
check_file_exist
(
split_file
)
self
.
sample_id_list
=
mmcv
.
list_from_file
(
split_file
)
self
.
test_mode
=
(
split
==
'test'
)
def
__len__
(
self
):
return
len
(
self
.
sample_id_list
)
def
get_aligned_box_label
(
self
,
idx
):
box_file
=
osp
.
join
(
self
.
root_dir
,
'scannet_instance_data'
,
f
'
{
idx
}
_aligned_bbox.npy'
)
mmcv
.
check_file_exist
(
box_file
)
return
np
.
load
(
box_file
)
def
get_unaligned_box_label
(
self
,
idx
):
box_file
=
osp
.
join
(
self
.
root_dir
,
'scannet_instance_data'
,
f
'
{
idx
}
_unaligned_bbox.npy'
)
mmcv
.
check_file_exist
(
box_file
)
return
np
.
load
(
box_file
)
def
get_axis_align_matrix
(
self
,
idx
):
matrix_file
=
osp
.
join
(
self
.
root_dir
,
'scannet_instance_data'
,
f
'
{
idx
}
_axis_align_matrix.npy'
)
mmcv
.
check_file_exist
(
matrix_file
)
return
np
.
load
(
matrix_file
)
def
get_images
(
self
,
idx
):
paths
=
[]
path
=
osp
.
join
(
self
.
root_dir
,
'posed_images'
,
idx
)
for
file
in
sorted
(
os
.
listdir
(
path
)):
if
file
.
endswith
(
'.jpg'
):
paths
.
append
(
osp
.
join
(
'posed_images'
,
idx
,
file
))
return
paths
def
get_extrinsics
(
self
,
idx
):
extrinsics
=
[]
path
=
osp
.
join
(
self
.
root_dir
,
'posed_images'
,
idx
)
for
file
in
sorted
(
os
.
listdir
(
path
)):
if
file
.
endswith
(
'.txt'
)
and
not
file
==
'intrinsic.txt'
:
extrinsics
.
append
(
np
.
loadtxt
(
osp
.
join
(
path
,
file
)))
return
extrinsics
def
get_intrinsics
(
self
,
idx
):
matrix_file
=
osp
.
join
(
self
.
root_dir
,
'posed_images'
,
idx
,
'intrinsic.txt'
)
mmcv
.
check_file_exist
(
matrix_file
)
return
np
.
loadtxt
(
matrix_file
)
def
get_infos
(
self
,
num_workers
=
4
,
has_label
=
True
,
sample_id_list
=
None
):
"""Get data infos.
This method gets information from the raw data.
Args:
num_workers (int, optional): Number of threads to be used.
Default: 4.
has_label (bool, optional): Whether the data has label.
Default: True.
sample_id_list (list[int], optional): Index list of the sample.
Default: None.
Returns:
infos (list[dict]): Information of the raw data.
"""
def
process_single_scene
(
sample_idx
):
print
(
f
'
{
self
.
split
}
sample_idx:
{
sample_idx
}
'
)
info
=
dict
()
pc_info
=
{
'num_features'
:
6
,
'lidar_idx'
:
sample_idx
}
info
[
'point_cloud'
]
=
pc_info
pts_filename
=
osp
.
join
(
self
.
root_dir
,
'scannet_instance_data'
,
f
'
{
sample_idx
}
_vert.npy'
)
points
=
np
.
load
(
pts_filename
)
mmcv
.
mkdir_or_exist
(
osp
.
join
(
self
.
root_dir
,
'points'
))
points
.
tofile
(
osp
.
join
(
self
.
root_dir
,
'points'
,
f
'
{
sample_idx
}
.bin'
))
info
[
'pts_path'
]
=
osp
.
join
(
'points'
,
f
'
{
sample_idx
}
.bin'
)
# update with RGB image paths if exist
if
os
.
path
.
exists
(
osp
.
join
(
self
.
root_dir
,
'posed_images'
)):
info
[
'intrinsics'
]
=
self
.
get_intrinsics
(
sample_idx
)
all_extrinsics
=
self
.
get_extrinsics
(
sample_idx
)
all_img_paths
=
self
.
get_images
(
sample_idx
)
# some poses in ScanNet are invalid
extrinsics
,
img_paths
=
[],
[]
for
extrinsic
,
img_path
in
zip
(
all_extrinsics
,
all_img_paths
):
if
np
.
all
(
np
.
isfinite
(
extrinsic
)):
img_paths
.
append
(
img_path
)
extrinsics
.
append
(
extrinsic
)
info
[
'extrinsics'
]
=
extrinsics
info
[
'img_paths'
]
=
img_paths
if
not
self
.
test_mode
:
pts_instance_mask_path
=
osp
.
join
(
self
.
root_dir
,
'scannet_instance_data'
,
f
'
{
sample_idx
}
_ins_label.npy'
)
pts_semantic_mask_path
=
osp
.
join
(
self
.
root_dir
,
'scannet_instance_data'
,
f
'
{
sample_idx
}
_sem_label.npy'
)
pts_instance_mask
=
np
.
load
(
pts_instance_mask_path
).
astype
(
np
.
int64
)
pts_semantic_mask
=
np
.
load
(
pts_semantic_mask_path
).
astype
(
np
.
int64
)
mmcv
.
mkdir_or_exist
(
osp
.
join
(
self
.
root_dir
,
'instance_mask'
))
mmcv
.
mkdir_or_exist
(
osp
.
join
(
self
.
root_dir
,
'semantic_mask'
))
pts_instance_mask
.
tofile
(
osp
.
join
(
self
.
root_dir
,
'instance_mask'
,
f
'
{
sample_idx
}
.bin'
))
pts_semantic_mask
.
tofile
(
osp
.
join
(
self
.
root_dir
,
'semantic_mask'
,
f
'
{
sample_idx
}
.bin'
))
info
[
'pts_instance_mask_path'
]
=
osp
.
join
(
'instance_mask'
,
f
'
{
sample_idx
}
.bin'
)
info
[
'pts_semantic_mask_path'
]
=
osp
.
join
(
'semantic_mask'
,
f
'
{
sample_idx
}
.bin'
)
if
has_label
:
annotations
=
{}
# box is of shape [k, 6 + class]
aligned_box_label
=
self
.
get_aligned_box_label
(
sample_idx
)
unaligned_box_label
=
self
.
get_unaligned_box_label
(
sample_idx
)
annotations
[
'gt_num'
]
=
aligned_box_label
.
shape
[
0
]
if
annotations
[
'gt_num'
]
!=
0
:
aligned_box
=
aligned_box_label
[:,
:
-
1
]
# k, 6
unaligned_box
=
unaligned_box_label
[:,
:
-
1
]
classes
=
aligned_box_label
[:,
-
1
]
# k
annotations
[
'name'
]
=
np
.
array
([
self
.
label2cat
[
self
.
cat_ids2class
[
classes
[
i
]]]
for
i
in
range
(
annotations
[
'gt_num'
])
])
# default names are given to aligned bbox for compatibility
# we also save unaligned bbox info with marked names
annotations
[
'location'
]
=
aligned_box
[:,
:
3
]
annotations
[
'dimensions'
]
=
aligned_box
[:,
3
:
6
]
annotations
[
'gt_boxes_upright_depth'
]
=
aligned_box
annotations
[
'unaligned_location'
]
=
unaligned_box
[:,
:
3
]
annotations
[
'unaligned_dimensions'
]
=
unaligned_box
[:,
3
:
6
]
annotations
[
'unaligned_gt_boxes_upright_depth'
]
=
unaligned_box
annotations
[
'index'
]
=
np
.
arange
(
annotations
[
'gt_num'
],
dtype
=
np
.
int32
)
annotations
[
'class'
]
=
np
.
array
([
self
.
cat_ids2class
[
classes
[
i
]]
for
i
in
range
(
annotations
[
'gt_num'
])
])
axis_align_matrix
=
self
.
get_axis_align_matrix
(
sample_idx
)
annotations
[
'axis_align_matrix'
]
=
axis_align_matrix
# 4x4
info
[
'annos'
]
=
annotations
return
info
sample_id_list
=
sample_id_list
if
sample_id_list
is
not
None
\
else
self
.
sample_id_list
with
futures
.
ThreadPoolExecutor
(
num_workers
)
as
executor
:
infos
=
executor
.
map
(
process_single_scene
,
sample_id_list
)
return
list
(
infos
)
class
ScanNetSegData
(
object
):
"""ScanNet dataset used to generate infos for semantic segmentation task.
Args:
data_root (str): Root path of the raw data.
ann_file (str): The generated scannet infos.
split (str, optional): Set split type of the data. Default: 'train'.
num_points (int, optional): Number of points in each data input.
Default: 8192.
label_weight_func (function, optional): Function to compute the
label weight. Default: None.
"""
def
__init__
(
self
,
data_root
,
ann_file
,
split
=
'train'
,
num_points
=
8192
,
label_weight_func
=
None
):
self
.
data_root
=
data_root
self
.
data_infos
=
mmcv
.
load
(
ann_file
)
self
.
split
=
split
assert
split
in
[
'train'
,
'val'
,
'test'
]
self
.
num_points
=
num_points
self
.
all_ids
=
np
.
arange
(
41
)
# all possible ids
self
.
cat_ids
=
np
.
array
([
1
,
2
,
3
,
4
,
5
,
6
,
7
,
8
,
9
,
10
,
11
,
12
,
14
,
16
,
24
,
28
,
33
,
34
,
36
,
39
])
# used for seg task
self
.
ignore_index
=
len
(
self
.
cat_ids
)
self
.
cat_id2class
=
np
.
ones
((
self
.
all_ids
.
shape
[
0
],),
dtype
=
np
.
int
)
*
\
self
.
ignore_index
for
i
,
cat_id
in
enumerate
(
self
.
cat_ids
):
self
.
cat_id2class
[
cat_id
]
=
i
# label weighting function is taken from
# https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24
self
.
label_weight_func
=
(
lambda
x
:
1.0
/
np
.
log
(
1.2
+
x
))
if
\
label_weight_func
is
None
else
label_weight_func
def
get_seg_infos
(
self
):
if
self
.
split
==
'test'
:
return
scene_idxs
,
label_weight
=
self
.
get_scene_idxs_and_label_weight
()
save_folder
=
osp
.
join
(
self
.
data_root
,
'seg_info'
)
mmcv
.
mkdir_or_exist
(
save_folder
)
np
.
save
(
osp
.
join
(
save_folder
,
f
'
{
self
.
split
}
_resampled_scene_idxs.npy'
),
scene_idxs
)
np
.
save
(
osp
.
join
(
save_folder
,
f
'
{
self
.
split
}
_label_weight.npy'
),
label_weight
)
print
(
f
'
{
self
.
split
}
resampled scene index and label weight saved'
)
def
_convert_to_label
(
self
,
mask
):
"""Convert class_id in loaded segmentation mask to label."""
if
isinstance
(
mask
,
str
):
if
mask
.
endswith
(
'npy'
):
mask
=
np
.
load
(
mask
)
else
:
mask
=
np
.
fromfile
(
mask
,
dtype
=
np
.
int64
)
label
=
self
.
cat_id2class
[
mask
]
return
label
def
get_scene_idxs_and_label_weight
(
self
):
"""Compute scene_idxs for data sampling and label weight for loss
calculation.
We sample more times for scenes with more points. Label_weight is
inversely proportional to number of class points.
"""
num_classes
=
len
(
self
.
cat_ids
)
num_point_all
=
[]
label_weight
=
np
.
zeros
((
num_classes
+
1
,
))
# ignore_index
for
data_info
in
self
.
data_infos
:
label
=
self
.
_convert_to_label
(
osp
.
join
(
self
.
data_root
,
data_info
[
'pts_semantic_mask_path'
]))
num_point_all
.
append
(
label
.
shape
[
0
])
class_count
,
_
=
np
.
histogram
(
label
,
range
(
num_classes
+
2
))
label_weight
+=
class_count
# repeat scene_idx for num_scene_point // num_sample_point times
sample_prob
=
np
.
array
(
num_point_all
)
/
float
(
np
.
sum
(
num_point_all
))
num_iter
=
int
(
np
.
sum
(
num_point_all
)
/
float
(
self
.
num_points
))
scene_idxs
=
[]
for
idx
in
range
(
len
(
self
.
data_infos
)):
scene_idxs
.
extend
([
idx
]
*
int
(
round
(
sample_prob
[
idx
]
*
num_iter
)))
scene_idxs
=
np
.
array
(
scene_idxs
).
astype
(
np
.
int32
)
# calculate label weight, adopted from PointNet++
label_weight
=
label_weight
[:
-
1
].
astype
(
np
.
float32
)
label_weight
=
label_weight
/
label_weight
.
sum
()
label_weight
=
self
.
label_weight_func
(
label_weight
).
astype
(
np
.
float32
)
return
scene_idxs
,
label_weight
autonomous_driving/openlane-v2/tools/data_converter/sunrgbd_data_utils.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
from
concurrent
import
futures
as
futures
from
os
import
path
as
osp
import
mmcv
import
numpy
as
np
from
scipy
import
io
as
sio
def
random_sampling
(
points
,
num_points
,
replace
=
None
):
"""Random sampling.
Sampling point cloud to a certain number of points.
Args:
points (ndarray): Point cloud.
num_points (int): The number of samples.
replace (bool): Whether the sample is with or without replacement.
Returns:
points (ndarray): Point cloud after sampling.
"""
if
num_points
<
0
:
return
points
if
replace
is
None
:
replace
=
(
points
.
shape
[
0
]
<
num_points
)
choices
=
np
.
random
.
choice
(
points
.
shape
[
0
],
num_points
,
replace
=
replace
)
return
points
[
choices
]
class
SUNRGBDInstance
(
object
):
def
__init__
(
self
,
line
):
data
=
line
.
split
(
' '
)
data
[
1
:]
=
[
float
(
x
)
for
x
in
data
[
1
:]]
self
.
classname
=
data
[
0
]
self
.
xmin
=
data
[
1
]
self
.
ymin
=
data
[
2
]
self
.
xmax
=
data
[
1
]
+
data
[
3
]
self
.
ymax
=
data
[
2
]
+
data
[
4
]
self
.
box2d
=
np
.
array
([
self
.
xmin
,
self
.
ymin
,
self
.
xmax
,
self
.
ymax
])
self
.
centroid
=
np
.
array
([
data
[
5
],
data
[
6
],
data
[
7
]])
self
.
width
=
data
[
8
]
self
.
length
=
data
[
9
]
self
.
height
=
data
[
10
]
# data[9] is x_size (length), data[8] is y_size (width), data[10] is
# z_size (height) in our depth coordinate system,
# l corresponds to the size along the x axis
self
.
size
=
np
.
array
([
data
[
9
],
data
[
8
],
data
[
10
]])
*
2
self
.
orientation
=
np
.
zeros
((
3
,
))
self
.
orientation
[
0
]
=
data
[
11
]
self
.
orientation
[
1
]
=
data
[
12
]
self
.
heading_angle
=
np
.
arctan2
(
self
.
orientation
[
1
],
self
.
orientation
[
0
])
self
.
box3d
=
np
.
concatenate
(
[
self
.
centroid
,
self
.
size
,
self
.
heading_angle
[
None
]])
class
SUNRGBDData
(
object
):
"""SUNRGBD data.
Generate scannet infos for sunrgbd_converter.
Args:
root_path (str): Root path of the raw data.
split (str, optional): Set split type of the data. Default: 'train'.
use_v1 (bool, optional): Whether to use v1. Default: False.
num_points (int, optional): Number of points to sample. Set to -1
to utilize all points. Defaults to -1.
"""
def
__init__
(
self
,
root_path
,
split
=
'train'
,
use_v1
=
False
,
num_points
=-
1
):
self
.
root_dir
=
root_path
self
.
split
=
split
self
.
split_dir
=
osp
.
join
(
root_path
,
'sunrgbd_trainval'
)
self
.
num_points
=
num_points
self
.
classes
=
[
'bed'
,
'table'
,
'sofa'
,
'chair'
,
'toilet'
,
'desk'
,
'dresser'
,
'night_stand'
,
'bookshelf'
,
'bathtub'
]
self
.
cat2label
=
{
cat
:
self
.
classes
.
index
(
cat
)
for
cat
in
self
.
classes
}
self
.
label2cat
=
{
label
:
self
.
classes
[
label
]
for
label
in
range
(
len
(
self
.
classes
))
}
assert
split
in
[
'train'
,
'val'
,
'test'
]
split_file
=
osp
.
join
(
self
.
split_dir
,
f
'
{
split
}
_data_idx.txt'
)
mmcv
.
check_file_exist
(
split_file
)
self
.
sample_id_list
=
map
(
int
,
mmcv
.
list_from_file
(
split_file
))
self
.
image_dir
=
osp
.
join
(
self
.
split_dir
,
'image'
)
self
.
calib_dir
=
osp
.
join
(
self
.
split_dir
,
'calib'
)
self
.
depth_dir
=
osp
.
join
(
self
.
split_dir
,
'depth'
)
if
use_v1
:
self
.
label_dir
=
osp
.
join
(
self
.
split_dir
,
'label_v1'
)
else
:
self
.
label_dir
=
osp
.
join
(
self
.
split_dir
,
'label'
)
def
__len__
(
self
):
return
len
(
self
.
sample_id_list
)
def
get_image
(
self
,
idx
):
img_filename
=
osp
.
join
(
self
.
image_dir
,
f
'
{
idx
:
06
d
}
.jpg'
)
return
mmcv
.
imread
(
img_filename
)
def
get_image_shape
(
self
,
idx
):
image
=
self
.
get_image
(
idx
)
return
np
.
array
(
image
.
shape
[:
2
],
dtype
=
np
.
int32
)
def
get_depth
(
self
,
idx
):
depth_filename
=
osp
.
join
(
self
.
depth_dir
,
f
'
{
idx
:
06
d
}
.mat'
)
depth
=
sio
.
loadmat
(
depth_filename
)[
'instance'
]
return
depth
def
get_calibration
(
self
,
idx
):
calib_filepath
=
osp
.
join
(
self
.
calib_dir
,
f
'
{
idx
:
06
d
}
.txt'
)
lines
=
[
line
.
rstrip
()
for
line
in
open
(
calib_filepath
)]
Rt
=
np
.
array
([
float
(
x
)
for
x
in
lines
[
0
].
split
(
' '
)])
Rt
=
np
.
reshape
(
Rt
,
(
3
,
3
),
order
=
'F'
).
astype
(
np
.
float32
)
K
=
np
.
array
([
float
(
x
)
for
x
in
lines
[
1
].
split
(
' '
)])
K
=
np
.
reshape
(
K
,
(
3
,
3
),
order
=
'F'
).
astype
(
np
.
float32
)
return
K
,
Rt
def
get_label_objects
(
self
,
idx
):
label_filename
=
osp
.
join
(
self
.
label_dir
,
f
'
{
idx
:
06
d
}
.txt'
)
lines
=
[
line
.
rstrip
()
for
line
in
open
(
label_filename
)]
objects
=
[
SUNRGBDInstance
(
line
)
for
line
in
lines
]
return
objects
def
get_infos
(
self
,
num_workers
=
4
,
has_label
=
True
,
sample_id_list
=
None
):
"""Get data infos.
This method gets information from the raw data.
Args:
num_workers (int, optional): Number of threads to be used.
Default: 4.
has_label (bool, optional): Whether the data has label.
Default: True.
sample_id_list (list[int], optional): Index list of the sample.
Default: None.
Returns:
infos (list[dict]): Information of the raw data.
"""
def
process_single_scene
(
sample_idx
):
print
(
f
'
{
self
.
split
}
sample_idx:
{
sample_idx
}
'
)
# convert depth to points
pc_upright_depth
=
self
.
get_depth
(
sample_idx
)
pc_upright_depth_subsampled
=
random_sampling
(
pc_upright_depth
,
self
.
num_points
)
info
=
dict
()
pc_info
=
{
'num_features'
:
6
,
'lidar_idx'
:
sample_idx
}
info
[
'point_cloud'
]
=
pc_info
mmcv
.
mkdir_or_exist
(
osp
.
join
(
self
.
root_dir
,
'points'
))
pc_upright_depth_subsampled
.
tofile
(
osp
.
join
(
self
.
root_dir
,
'points'
,
f
'
{
sample_idx
:
06
d
}
.bin'
))
info
[
'pts_path'
]
=
osp
.
join
(
'points'
,
f
'
{
sample_idx
:
06
d
}
.bin'
)
img_path
=
osp
.
join
(
'image'
,
f
'
{
sample_idx
:
06
d
}
.jpg'
)
image_info
=
{
'image_idx'
:
sample_idx
,
'image_shape'
:
self
.
get_image_shape
(
sample_idx
),
'image_path'
:
img_path
}
info
[
'image'
]
=
image_info
K
,
Rt
=
self
.
get_calibration
(
sample_idx
)
calib_info
=
{
'K'
:
K
,
'Rt'
:
Rt
}
info
[
'calib'
]
=
calib_info
if
has_label
:
obj_list
=
self
.
get_label_objects
(
sample_idx
)
annotations
=
{}
annotations
[
'gt_num'
]
=
len
([
obj
.
classname
for
obj
in
obj_list
if
obj
.
classname
in
self
.
cat2label
.
keys
()
])
if
annotations
[
'gt_num'
]
!=
0
:
annotations
[
'name'
]
=
np
.
array
([
obj
.
classname
for
obj
in
obj_list
if
obj
.
classname
in
self
.
cat2label
.
keys
()
])
annotations
[
'bbox'
]
=
np
.
concatenate
([
obj
.
box2d
.
reshape
(
1
,
4
)
for
obj
in
obj_list
if
obj
.
classname
in
self
.
cat2label
.
keys
()
],
axis
=
0
)
annotations
[
'location'
]
=
np
.
concatenate
([
obj
.
centroid
.
reshape
(
1
,
3
)
for
obj
in
obj_list
if
obj
.
classname
in
self
.
cat2label
.
keys
()
],
axis
=
0
)
annotations
[
'dimensions'
]
=
2
*
np
.
array
([
[
obj
.
length
,
obj
.
width
,
obj
.
height
]
for
obj
in
obj_list
if
obj
.
classname
in
self
.
cat2label
.
keys
()
])
# lwh (depth) format
annotations
[
'rotation_y'
]
=
np
.
array
([
obj
.
heading_angle
for
obj
in
obj_list
if
obj
.
classname
in
self
.
cat2label
.
keys
()
])
annotations
[
'index'
]
=
np
.
arange
(
len
(
obj_list
),
dtype
=
np
.
int32
)
annotations
[
'class'
]
=
np
.
array
([
self
.
cat2label
[
obj
.
classname
]
for
obj
in
obj_list
if
obj
.
classname
in
self
.
cat2label
.
keys
()
])
annotations
[
'gt_boxes_upright_depth'
]
=
np
.
stack
(
[
obj
.
box3d
for
obj
in
obj_list
if
obj
.
classname
in
self
.
cat2label
.
keys
()
],
axis
=
0
)
# (K,8)
info
[
'annos'
]
=
annotations
return
info
sample_id_list
=
sample_id_list
if
\
sample_id_list
is
not
None
else
self
.
sample_id_list
with
futures
.
ThreadPoolExecutor
(
num_workers
)
as
executor
:
infos
=
executor
.
map
(
process_single_scene
,
sample_id_list
)
return
list
(
infos
)
autonomous_driving/openlane-v2/tools/data_converter/waymo_converter.py
0 → 100644
View file @
305e110f
This diff is collapsed.
Click to expand it.
autonomous_driving/openlane-v2/tools/deployment/mmdet3d2torchserve.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
from
argparse
import
ArgumentParser
,
Namespace
from
pathlib
import
Path
from
tempfile
import
TemporaryDirectory
import
mmcv
try
:
from
model_archiver.model_packaging
import
package_model
from
model_archiver.model_packaging_utils
import
ModelExportUtils
except
ImportError
:
package_model
=
None
def
mmdet3d2torchserve
(
config_file
:
str
,
checkpoint_file
:
str
,
output_folder
:
str
,
model_name
:
str
,
model_version
:
str
=
'1.0'
,
force
:
bool
=
False
,
):
"""Converts MMDetection3D model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file (str):
In MMDetection3D config format.
The contents vary for each task repository.
checkpoint_file (str):
In MMDetection3D checkpoint format.
The contents vary for each task repository.
output_folder (str):
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name (str):
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version (str, optional):
Model's version. Default: '1.0'.
force (bool, optional):
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
Default: False.
"""
mmcv
.
mkdir_or_exist
(
output_folder
)
config
=
mmcv
.
Config
.
fromfile
(
config_file
)
with
TemporaryDirectory
()
as
tmpdir
:
config
.
dump
(
f
'
{
tmpdir
}
/config.py'
)
args
=
Namespace
(
**
{
'model_file'
:
f
'
{
tmpdir
}
/config.py'
,
'serialized_file'
:
checkpoint_file
,
'handler'
:
f
'
{
Path
(
__file__
).
parent
}
/mmdet3d_handler.py'
,
'model_name'
:
model_name
or
Path
(
checkpoint_file
).
stem
,
'version'
:
model_version
,
'export_path'
:
output_folder
,
'force'
:
force
,
'requirements_file'
:
None
,
'extra_files'
:
None
,
'runtime'
:
'python'
,
'archive_format'
:
'default'
})
manifest
=
ModelExportUtils
.
generate_manifest_json
(
args
)
package_model
(
args
,
manifest
)
def
parse_args
():
parser
=
ArgumentParser
(
description
=
'Convert MMDetection models to TorchServe `.mar` format.'
)
parser
.
add_argument
(
'config'
,
type
=
str
,
help
=
'config file path'
)
parser
.
add_argument
(
'checkpoint'
,
type
=
str
,
help
=
'checkpoint file path'
)
parser
.
add_argument
(
'--output-folder'
,
type
=
str
,
required
=
True
,
help
=
'Folder where `{model_name}.mar` will be created.'
)
parser
.
add_argument
(
'--model-name'
,
type
=
str
,
default
=
None
,
help
=
'If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.'
)
parser
.
add_argument
(
'--model-version'
,
type
=
str
,
default
=
'1.0'
,
help
=
'Number used for versioning.'
)
parser
.
add_argument
(
'-f'
,
'--force'
,
action
=
'store_true'
,
help
=
'overwrite the existing `{model_name}.mar`'
)
args
=
parser
.
parse_args
()
return
args
if
__name__
==
'__main__'
:
args
=
parse_args
()
if
package_model
is
None
:
raise
ImportError
(
'`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver'
)
mmdet3d2torchserve
(
args
.
config
,
args
.
checkpoint
,
args
.
output_folder
,
args
.
model_name
,
args
.
model_version
,
args
.
force
)
autonomous_driving/openlane-v2/tools/deployment/mmdet3d_handler.py
0 → 100644
View file @
305e110f
# Copyright (c) OpenMMLab. All rights reserved.
import
base64
import
os
import
numpy
as
np
import
torch
from
ts.torch_handler.base_handler
import
BaseHandler
from
mmdet3d.apis
import
inference_detector
,
init_model
from
mmdet3d.core.points
import
get_points_type
class
MMdet3dHandler
(
BaseHandler
):
"""MMDetection3D Handler used in TorchServe.
Handler to load models in MMDetection3D, and it will process data to get
predicted results. For now, it only supports SECOND.
"""
threshold
=
0.5
load_dim
=
4
use_dim
=
[
0
,
1
,
2
,
3
]
coord_type
=
'LIDAR'
attribute_dims
=
None
def
initialize
(
self
,
context
):
"""Initialize function loads the model in MMDetection3D.
Args:
context (context): It is a JSON Object containing information
pertaining to the model artifacts parameters.
"""
properties
=
context
.
system_properties
self
.
map_location
=
'cuda'
if
torch
.
cuda
.
is_available
()
else
'cpu'
self
.
device
=
torch
.
device
(
self
.
map_location
+
':'
+
str
(
properties
.
get
(
'gpu_id'
))
if
torch
.
cuda
.
is_available
()
else
self
.
map_location
)
self
.
manifest
=
context
.
manifest
model_dir
=
properties
.
get
(
'model_dir'
)
serialized_file
=
self
.
manifest
[
'model'
][
'serializedFile'
]
checkpoint
=
os
.
path
.
join
(
model_dir
,
serialized_file
)
self
.
config_file
=
os
.
path
.
join
(
model_dir
,
'config.py'
)
self
.
model
=
init_model
(
self
.
config_file
,
checkpoint
,
self
.
device
)
self
.
initialized
=
True
def
preprocess
(
self
,
data
):
"""Preprocess function converts data into LiDARPoints class.
Args:
data (List): Input data from the request.
Returns:
`LiDARPoints` : The preprocess function returns the input
point cloud data as LiDARPoints class.
"""
for
row
in
data
:
# Compat layer: normally the envelope should just return the data
# directly, but older versions of Torchserve didn't have envelope.
pts
=
row
.
get
(
'data'
)
or
row
.
get
(
'body'
)
if
isinstance
(
pts
,
str
):
pts
=
base64
.
b64decode
(
pts
)
points
=
np
.
frombuffer
(
pts
,
dtype
=
np
.
float32
)
points
=
points
.
reshape
(
-
1
,
self
.
load_dim
)
points
=
points
[:,
self
.
use_dim
]
points_class
=
get_points_type
(
self
.
coord_type
)
points
=
points_class
(
points
,
points_dim
=
points
.
shape
[
-
1
],
attribute_dims
=
self
.
attribute_dims
)
return
points
def
inference
(
self
,
data
):
"""Inference Function.
This function is used to make a prediction call on the
given input request.
Args:
data (`LiDARPoints`): LiDARPoints class passed to make
the inference request.
Returns:
List(dict) : The predicted result is returned in this function.
"""
results
,
_
=
inference_detector
(
self
.
model
,
data
)
return
results
def
postprocess
(
self
,
data
):
"""Postprocess function.
This function makes use of the output from the inference and
converts it into a torchserve supported response output.
Args:
data (List[dict]): The data received from the prediction
output of the model.
Returns:
List: The post process function returns a list of the predicted
output.
"""
output
=
[]
for
pts_index
,
result
in
enumerate
(
data
):
output
.
append
([])
if
'pts_bbox'
in
result
.
keys
():
pred_bboxes
=
result
[
'pts_bbox'
][
'boxes_3d'
].
tensor
.
numpy
()
pred_scores
=
result
[
'pts_bbox'
][
'scores_3d'
].
numpy
()
else
:
pred_bboxes
=
result
[
'boxes_3d'
].
tensor
.
numpy
()
pred_scores
=
result
[
'scores_3d'
].
numpy
()
index
=
pred_scores
>
self
.
threshold
bbox_coords
=
pred_bboxes
[
index
].
tolist
()
score
=
pred_scores
[
index
].
tolist
()
output
[
pts_index
].
append
({
'3dbbox'
:
bbox_coords
,
'score'
:
score
})
return
output
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment