Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
OpenPCDet
Commits
1574d4a4
Commit
1574d4a4
authored
Jul 17, 2022
by
YangXiuyu
Browse files
Add custom dataset support
For kitti-like custom datasets, this will help.
parent
8c6e8890
Changes
5
Hide whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
457 additions
and
1 deletion
+457
-1
pcdet/datasets/__init__.py
pcdet/datasets/__init__.py
+3
-1
pcdet/datasets/custom/__init__.py
pcdet/datasets/custom/__init__.py
+0
-0
pcdet/datasets/custom/custom_dataset.py
pcdet/datasets/custom/custom_dataset.py
+371
-0
pcdet/datasets/custom/custom_utils.py
pcdet/datasets/custom/custom_utils.py
+0
-0
pcdet/utils/object3d_custom.py
pcdet/utils/object3d_custom.py
+83
-0
No files found.
pcdet/datasets/__init__.py
View file @
1574d4a4
...
@@ -11,6 +11,7 @@ from .nuscenes.nuscenes_dataset import NuScenesDataset
...
@@ -11,6 +11,7 @@ from .nuscenes.nuscenes_dataset import NuScenesDataset
from
.waymo.waymo_dataset
import
WaymoDataset
from
.waymo.waymo_dataset
import
WaymoDataset
from
.pandaset.pandaset_dataset
import
PandasetDataset
from
.pandaset.pandaset_dataset
import
PandasetDataset
from
.lyft.lyft_dataset
import
LyftDataset
from
.lyft.lyft_dataset
import
LyftDataset
from
.custom.custom_dataset
import
CustomDataset
__all__
=
{
__all__
=
{
'DatasetTemplate'
:
DatasetTemplate
,
'DatasetTemplate'
:
DatasetTemplate
,
...
@@ -18,7 +19,8 @@ __all__ = {
...
@@ -18,7 +19,8 @@ __all__ = {
'NuScenesDataset'
:
NuScenesDataset
,
'NuScenesDataset'
:
NuScenesDataset
,
'WaymoDataset'
:
WaymoDataset
,
'WaymoDataset'
:
WaymoDataset
,
'PandasetDataset'
:
PandasetDataset
,
'PandasetDataset'
:
PandasetDataset
,
'LyftDataset'
:
LyftDataset
'LyftDataset'
:
LyftDataset
,
'CustomDataset'
:
CustomDataset
}
}
...
...
pcdet/datasets/custom/__init__.py
0 → 100644
View file @
1574d4a4
pcdet/datasets/custom/custom_dataset.py
0 → 100644
View file @
1574d4a4
import
copy
import
pickle
import
os
import
numpy
as
np
from
skimage
import
io
from
.
import
custom_utils
from
...ops.roiaware_pool3d
import
roiaware_pool3d_utils
from
...utils
import
box_utils
,
common_utils
,
object3d_custom
from
..dataset
import
DatasetTemplate
class
CustomDataset
(
DatasetTemplate
):
def
__init__
(
self
,
dataset_cfg
,
class_names
,
training
=
True
,
root_path
=
None
,
logger
=
None
,
ext
=
'.bin'
):
"""
Args:
root_path:
dataset_cfg:
class_names:
training:
logger:
"""
super
().
__init__
(
dataset_cfg
=
dataset_cfg
,
class_names
=
class_names
,
training
=
training
,
root_path
=
root_path
,
logger
=
logger
)
self
.
split
=
self
.
dataset_cfg
.
DATA_SPLIT
[
self
.
mode
]
self
.
root_split_path
=
os
.
path
.
join
(
self
.
root_path
,
(
'training'
if
self
.
split
!=
'test'
else
'testing'
))
split_dir
=
os
.
path
.
join
(
self
.
root_path
,
'ImageSets'
,(
self
.
split
+
'.txt'
))
self
.
sample_id_list
=
[
x
.
strip
()
for
x
in
open
(
split_dir
).
readlines
()]
if
os
.
path
.
exists
(
split_dir
)
else
None
self
.
custom_infos
=
[]
self
.
include_custom_data
(
self
.
mode
)
self
.
ext
=
ext
def
include_custom_data
(
self
,
mode
):
if
self
.
logger
is
not
None
:
self
.
logger
.
info
(
'Loading Custom dataset.'
)
custom_infos
=
[]
for
info_path
in
self
.
dataset_cfg
.
INFO_PATH
[
mode
]:
info_path
=
self
.
root_path
/
info_path
if
not
info_path
.
exists
():
continue
with
open
(
info_path
,
'rb'
)
as
f
:
infos
=
pickle
.
load
(
f
)
custom_infos
.
extend
(
infos
)
self
.
custom_infos
.
extend
(
custom_infos
)
if
self
.
logger
is
not
None
:
self
.
logger
.
info
(
'Total samples for CUSTOM dataset: %d'
%
(
len
(
custom_infos
)))
def
get_infos
(
self
,
num_workers
=
16
,
has_label
=
True
,
count_inside_pts
=
True
,
sample_id_list
=
None
):
import
concurrent.futures
as
futures
# Process single scene
def
process_single_scene
(
sample_idx
):
print
(
'%s sample_idx: %s'
%
(
self
.
split
,
sample_idx
))
info
=
{}
pc_info
=
{
'num_features'
:
4
,
'lidar_idx'
:
sample_idx
}
info
[
'point_cloud'
]
=
pc_info
# no images, calibs are need to transform the labels
type_to_id
=
{
'Car'
:
1
,
'Pedestrian'
:
2
,
'Cyclist'
:
3
}
if
has_label
:
obj_list
=
self
.
get_label
(
sample_idx
)
annotations
=
{}
annotations
[
'name'
]
=
np
.
array
([
obj
.
cls_type
for
obj
in
obj_list
])
# 1-dimension
annotations
[
'dimensions'
]
=
np
.
array
([[
obj
.
l
,
obj
.
h
,
obj
.
w
]
for
obj
in
obj_list
])
annotations
[
'location'
]
=
np
.
concatenate
([
obj
.
loc
.
reshape
(
1
,
3
)
for
obj
in
obj_list
])
annotations
[
'rotation_y'
]
=
np
.
array
([
obj
.
ry
for
obj
in
obj_list
])
# 1-dimension
num_objects
=
len
([
obj
.
cls_type
for
obj
in
obj_list
if
obj
.
cls_type
!=
'DontCare'
])
num_gt
=
len
(
annotations
[
'name'
])
index
=
list
(
range
(
num_objects
))
+
[
-
1
]
*
(
num_gt
-
num_objects
)
annotations
[
'index'
]
=
np
.
array
(
index
,
dtype
=
np
.
int32
)
loc
=
annotations
[
'location'
][:
num_objects
]
dims
=
annotations
[
'dimensions'
][:
num_objects
]
rots
=
annotations
[
'rotation_y'
][:
num_objects
]
loc_lidar
=
self
.
get_calib
(
loc
)
l
,
h
,
w
=
dims
[:,
0
:
1
],
dims
[:,
1
:
2
],
dims
[:,
2
:
3
]
gt_boxes_lidar
=
np
.
concatenate
([
loc_lidar
,
l
,
w
,
h
,
(
np
.
pi
/
2
-
rots
[...,
np
.
newaxis
])],
axis
=
1
)
# 2-dimension array
annotations
[
'gt_boxes_lidar'
]
=
gt_boxes_lidar
info
[
'annos'
]
=
annotations
return
info
sample_id_list
=
sample_id_list
if
sample_id_list
is
not
None
else
self
.
sample_id_list
# create a thread pool to improve the velocity
with
futures
.
ThreadPoolExecutor
(
num_workers
)
as
executor
:
infos
=
executor
.
map
(
process_single_scene
,
sample_id_list
)
return
list
(
infos
)
def
get_calib
(
self
,
loc
):
"""
This calibration is different from the kitti dataset.
The transform formual of labelCloud: ROOT/labelCloud/io/labels/kitti.py: import labels
if self.transformed:
centroid = centroid[2], -centroid[0], centroid[1] - 2.3
dimensions = [float(v) for v in line_elements[8:11]]
if self.transformed:
dimensions = dimensions[2], dimensions[1], dimensions[0]
bbox = BBox(*centroid, *dimensions)
"""
loc_lidar
=
np
.
concatenate
([
np
.
array
((
float
(
loc_obj
[
2
]),
float
(
-
loc_obj
[
0
]),
float
(
loc_obj
[
1
]
-
2.3
)),
dtype
=
np
.
float32
).
reshape
(
1
,
3
)
for
loc_obj
in
loc
])
return
loc_lidar
def
get_label
(
self
,
idx
):
label_file
=
self
.
root_split_path
/
'label_2'
/
(
'%s.txt'
%
idx
)
assert
label_file
.
exists
()
return
object3d_custom
.
get_objects_from_label
(
label_file
)
def
get_lidar
(
self
,
idx
,
getitem
):
"""
Loads point clouds for a sample
Args:
index (int): Index of the point cloud file to get.
Returns:
np.array(N, 4): point cloud.
"""
# get lidar statistics
if
getitem
==
True
:
lidar_file
=
self
.
root_split_path
+
'/velodyne/'
+
(
'%s.bin'
%
idx
)
else
:
lidar_file
=
self
.
root_split_path
/
'velodyne'
/
(
'%s.bin'
%
idx
)
return
np
.
fromfile
(
str
(
lidar_file
),
dtype
=
np
.
float32
).
reshape
(
-
1
,
4
)
def
set_split
(
self
,
split
):
super
().
__init__
(
dataset_cfg
=
self
.
dataset_cfg
,
class_names
=
self
.
class_names
,
training
=
self
.
training
,
root_path
=
self
.
root_path
,
logger
=
self
.
logger
)
self
.
split
=
split
self
.
root_split_path
=
self
.
root_path
/
(
'training'
if
self
.
split
!=
'test'
else
'testing'
)
split_dir
=
self
.
root_path
/
'ImageSets'
/
(
self
.
split
+
'.txt'
)
self
.
sample_id_list
=
[
x
.
strip
()
for
x
in
open
(
split_dir
).
readlines
()]
if
split_dir
.
exists
()
else
None
# Create gt database for data augmentation
def
create_groundtruth_database
(
self
,
info_path
=
None
,
used_classes
=
None
,
split
=
'train'
):
import
torch
database_save_path
=
Path
(
self
.
root_path
)
/
(
'gt_database'
if
split
==
'train'
else
(
'gt_database_%s'
%
split
))
db_info_save_path
=
Path
(
self
.
root_path
)
/
(
'custom_dbinfos_%s.pkl'
%
split
)
database_save_path
.
mkdir
(
parents
=
True
,
exist_ok
=
True
)
all_db_infos
=
{}
with
open
(
info_path
,
'rb'
)
as
f
:
infos
=
pickle
.
load
(
f
)
# For each .bin file
for
k
in
range
(
len
(
infos
)):
print
(
'gt_database sample: %d/%d'
%
(
k
+
1
,
len
(
infos
)))
info
=
infos
[
k
]
sample_idx
=
info
[
'point_cloud'
][
'lidar_idx'
]
points
=
self
.
get_lidar
(
sample_idx
,
False
)
annos
=
info
[
'annos'
]
names
=
annos
[
'name'
]
gt_boxes
=
annos
[
'gt_boxes_lidar'
]
num_obj
=
gt_boxes
.
shape
[
0
]
point_indices
=
roiaware_pool3d_utils
.
points_in_boxes_cpu
(
torch
.
from_numpy
(
points
[:,
0
:
3
]),
torch
.
from_numpy
(
gt_boxes
)
).
numpy
()
# (nboxes, npoints)
for
i
in
range
(
num_obj
):
filename
=
'%s_%s_%d.bin'
%
(
sample_idx
,
names
[
i
],
i
)
filepath
=
database_save_path
/
filename
gt_points
=
points
[
point_indices
[
i
]
>
0
]
gt_points
[:,
:
3
]
-=
gt_boxes
[
i
,
:
3
]
with
open
(
filepath
,
'w'
)
as
f
:
gt_points
.
tofile
(
f
)
if
(
used_classes
is
None
)
or
names
[
i
]
in
used_classes
:
db_path
=
str
(
filepath
.
relative_to
(
self
.
root_path
))
# gt_database/xxxxx.bin
db_info
=
{
'name'
:
names
[
i
],
'path'
:
db_path
,
'gt_idx'
:
i
,
'box3d_lidar'
:
gt_boxes
[
i
],
'num_points_in_gt'
:
gt_points
.
shape
[
0
]}
if
names
[
i
]
in
all_db_infos
:
all_db_infos
[
names
[
i
]].
append
(
db_info
)
else
:
all_db_infos
[
names
[
i
]]
=
[
db_info
]
# Output the num of all classes in database
for
k
,
v
in
all_db_infos
.
items
():
print
(
'Database %s: %d'
%
(
k
,
len
(
v
)))
with
open
(
db_info_save_path
,
'wb'
)
as
f
:
pickle
.
dump
(
all_db_infos
,
f
)
@
staticmethod
def
generate_prediction_dicts
(
batch_dict
,
pred_dicts
,
class_names
,
output_path
=
None
):
"""
Args:
batch_dict:
frame_id:
pred_dicts: list of pred_dicts
pred_boxes: (N,7), Tensor
pred_scores: (N), Tensor
pred_lables: (N), Tensor
class_names:
output_path:
Returns:
"""
def
get_template_prediction
(
num_smaples
):
ret_dict
=
{
'name'
:
np
.
zeros
(
num_smaples
),
'alpha'
:
np
.
zeros
(
num_smaples
),
'dimensions'
:
np
.
zeros
([
num_smaples
,
3
]),
'location'
:
np
.
zeros
([
num_smaples
,
3
]),
'rotation_y'
:
np
.
zero
(
num_smaples
),
'score'
:
np
.
zeros
(
num_smaples
),
'boxes_lidar'
:
np
.
zeros
([
num_smaples
,
7
])
}
return
ret_dict
def
generate_single_sample_dict
(
batch_index
,
box_dict
):
pred_scores
=
box_dict
[
'pred_scores'
].
cpu
().
numpy
()
pred_boxes
=
box_dict
[
'pred_boxes'
].
cpu
().
numpy
()
pred_labels
=
box_dict
[
'pred_labels'
].
cpu
().
numpy
()
# Define an empty template dict to store the prediction information, 'pred_scores.shape[0]' means 'num_samples'
pred_dict
=
get_template_prediction
(
pred_scores
.
shape
[
0
])
# If num_samples equals zero then return the empty dict
if
pred_scores
.
shape
[
0
]
==
0
:
return
pred_dict
# No calibration files
pred_boxes_camera
=
box_utils
.
boxes3d_lidar_to_kitti_camera
[
pred_boxes
]
pred_dict
[
'name'
]
=
np
.
array
(
class_names
)[
pred_labels
-
1
]
pred_dict
[
'alpha'
]
=
-
np
.
arctan2
(
-
pred_boxes
[:,
1
],
pred_boxes
[:,
0
])
+
pred_boxes_camera
[:,
6
]
pred_dict
[
'dimensions'
]
=
pred_boxes_camera
[:,
3
:
6
]
pred_dict
[
'location'
]
=
pred_boxes_camera
[:,
0
:
3
]
pred_dict
[
'rotation_y'
]
=
pred_boxes_camera
[:,
6
]
pred_dict
[
'score'
]
=
pred_scores
pred_dict
[
'boxes_lidar'
]
=
pred_boxes
return
pred_dict
annos
=
[]
for
index
,
box_dict
in
enumerate
(
pred_dicts
):
frame_id
=
batch_dict
[
'frame_id'
][
index
]
single_pred_dict
=
generate_single_sample_dict
(
index
,
box_dict
)
single_pred_dict
[
'frame_id'
]
=
frame_id
annos
.
append
(
single_pred_dict
)
# Output pred results to Output-path in .txt file
if
output_path
is
not
None
:
cur_det_file
=
output_path
/
(
'%s.txt'
%
frame_id
)
with
open
(
cur_det_file
,
'w'
)
as
f
:
bbox
=
single_pred_dict
[
'bbox'
]
loc
=
single_pred_dict
[
'location'
]
dims
=
single_pred_dict
[
'dimensions'
]
# lhw -> hwl: lidar -> camera
for
idx
in
range
(
len
(
bbox
)):
print
(
'%s -1 -1 %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f %.4f'
%
(
single_pred_dict
[
'name'
][
idx
],
single_pred_dict
[
'alpha'
][
idx
],
bbox
[
idx
][
0
],
bbox
[
idx
][
1
],
bbox
[
idx
][
2
],
bbox
[
idx
][
3
],
dims
[
idx
][
1
],
dims
[
idx
][
2
],
dims
[
idx
][
0
],
loc
[
idx
][
0
],
loc
[
idx
][
1
],
loc
[
idx
][
2
],
single_pred_dict
[
'rotation_y'
][
idx
],
single_pred_dict
[
'score'
][
idx
]),
file
=
f
)
return
annos
def
__len__
(
self
):
if
self
.
_merge_all_iters_to_one_epoch
:
return
len
(
self
.
sample_id_list
)
*
self
.
total_epochs
return
len
(
self
.
custom_infos
)
def
__getitem__
(
self
,
index
):
"""
Function:
Read 'velodyne' folder as pointclouds
Read 'label_2' folder as labels
Return type 'dict'
"""
if
self
.
_merge_all_iters_to_one_epoch
:
index
=
index
%
len
(
self
.
custom_infos
)
info
=
copy
.
deepcopy
(
self
.
custom_infos
[
index
])
sample_idx
=
info
[
'point_cloud'
][
'lidar_idx'
]
get_item_list
=
self
.
dataset_cfg
.
get
(
'GET_ITEM_LIST'
,
[
'points'
])
input_dict
=
{
'frame_id'
:
self
.
sample_id_list
[
index
],
}
"""
Here infos was generated by get_infos
"""
if
'annos'
in
info
:
annos
=
info
[
'annos'
]
annos
=
common_utils
.
drop_info_with_name
(
annos
,
name
=
'DontCare'
)
loc
,
dims
,
rots
=
annos
[
'location'
],
annos
[
'dimensions'
],
annos
[
'rotation_y'
]
gt_names
=
annos
[
'name'
]
gt_boxes_lidar
=
annos
[
'gt_boxes_lidar'
]
if
'points'
in
get_item_list
:
points
=
self
.
get_lidar
(
sample_idx
,
True
)
input_dict
[
'points'
]
=
points
input_dict
.
update
({
'gt_names'
:
gt_names
,
'gt_boxes'
:
gt_boxes_lidar
})
data_dict
=
self
.
prepare_data
(
data_dict
=
input_dict
)
return
data_dict
def
create_custom_infos
(
dataset_cfg
,
class_names
,
data_path
,
save_path
,
workers
=
4
):
dataset
=
CustomDataset
(
dataset_cfg
=
dataset_cfg
,
class_names
=
class_names
,
root_path
=
data_path
,
training
=
False
)
train_split
,
val_split
=
'train'
,
'val'
# No evaluation
train_filename
=
save_path
/
(
'custom_infos_%s.pkl'
%
train_split
)
val_filenmae
=
save_path
/
(
'custom_infos%s.pkl'
%
val_split
)
trainval_filename
=
save_path
/
'custom_infos_trainval.pkl'
test_filename
=
save_path
/
'custom_infos_test.pkl'
print
(
'------------------------Start to generate data infos------------------------'
)
dataset
.
set_split
(
train_split
)
custom_infos_train
=
dataset
.
get_infos
(
num_workers
=
workers
,
has_label
=
True
,
count_inside_pts
=
True
)
with
open
(
train_filename
,
'wb'
)
as
f
:
pickle
.
dump
(
custom_infos_train
,
f
)
print
(
'Custom info train file is save to %s'
%
train_filename
)
dataset
.
set_split
(
'test'
)
custom_infos_test
=
dataset
.
get_infos
(
num_workers
=
workers
,
has_label
=
False
,
count_inside_pts
=
False
)
with
open
(
test_filename
,
'wb'
)
as
f
:
pickle
.
dump
(
custom_infos_test
,
f
)
print
(
'Custom info test file is saved to %s'
%
test_filename
)
print
(
'------------------------Start create groundtruth database for data augmentation------------------------'
)
dataset
.
set_split
(
train_split
)
dataset
.
create_groundtruth_database
(
train_filename
,
split
=
train_split
)
print
(
'------------------------Data preparation done------------------------'
)
if
__name__
==
'__main__'
:
import
sys
if
sys
.
argv
.
__len__
()
>
1
and
sys
.
argv
[
1
]
==
'create_custom_infos'
:
import
yaml
from
pathlib
import
Path
from
easydict
import
EasyDict
dataset_cfg
=
EasyDict
(
yaml
.
safe_load
(
open
(
sys
.
argv
[
2
])))
ROOT_DIR
=
(
Path
(
__file__
).
resolve
().
parent
/
'../../../'
).
resolve
()
create_custom_infos
(
dataset_cfg
=
dataset_cfg
,
class_names
=
[
'Car'
,
'Pedestrian'
,
'Cyclist'
],
data_path
=
ROOT_DIR
/
'data'
/
'custom'
,
save_path
=
ROOT_DIR
/
'data'
/
'custom'
)
pcdet/datasets/custom/custom_utils.py
0 → 100644
View file @
1574d4a4
pcdet/utils/object3d_custom.py
0 → 100644
View file @
1574d4a4
import
numpy
as
np
def
get_objects_from_label
(
label_file
):
with
open
(
label_file
,
'r'
)
as
f
:
lines
=
f
.
readlines
()
objects
=
[
Object3d
(
line
)
for
line
in
lines
]
return
objects
def
cls_type_to_id
(
cls_type
):
type_to_id
=
{
'Car'
:
1
,
'Pedestrian'
:
2
,
'Cyclist'
:
3
,
'Van'
:
4
}
if
cls_type
not
in
type_to_id
.
keys
():
return
-
1
return
type_to_id
[
cls_type
]
class
Object3d
(
object
):
def
__init__
(
self
,
line
):
label
=
line
.
strip
().
split
(
' '
)
self
.
src
=
line
self
.
cls_type
=
label
[
0
]
self
.
cls_id
=
cls_type_to_id
(
self
.
cls_type
)
self
.
truncation
=
float
(
label
[
1
])
self
.
occlusion
=
float
(
label
[
2
])
# 0:fully visible 1:partly occluded 2:largely occluded 3:unknown
self
.
alpha
=
float
(
label
[
3
])
self
.
box2d
=
np
.
array
((
float
(
label
[
4
]),
float
(
label
[
5
]),
float
(
label
[
6
]),
float
(
label
[
7
])),
dtype
=
np
.
float32
)
self
.
h
=
float
(
label
[
8
])
self
.
w
=
float
(
label
[
9
])
self
.
l
=
float
(
label
[
10
])
self
.
loc
=
np
.
array
((
float
(
label
[
11
]),
float
(
label
[
12
]),
float
(
label
[
13
])),
dtype
=
np
.
float32
)
self
.
dis_to_cam
=
np
.
linalg
.
norm
(
self
.
loc
)
self
.
ry
=
float
(
label
[
14
])
self
.
score
=
float
(
label
[
15
])
if
label
.
__len__
()
==
16
else
-
1.0
self
.
level_str
=
None
self
.
level
=
self
.
get_custom_obj_level
()
def
get_custom_obj_level
(
self
):
height
=
float
(
self
.
box2d
[
3
])
-
float
(
self
.
box2d
[
1
])
+
1
if
height
>=
40
and
self
.
truncation
<=
0.15
and
self
.
occlusion
<=
0
:
self
.
level_str
=
'Easy'
return
0
# Easy
elif
height
>=
25
and
self
.
truncation
<=
0.3
and
self
.
occlusion
<=
1
:
self
.
level_str
=
'Moderate'
return
1
# Moderate
elif
height
>=
25
and
self
.
truncation
<=
0.5
and
self
.
occlusion
<=
2
:
self
.
level_str
=
'Hard'
return
2
# Hard
else
:
self
.
level_str
=
'UnKnown'
return
-
1
def
generate_corners3d
(
self
):
"""
generate corners3d representation for this object
:return corners_3d: (8, 3) corners of box3d in camera coord
"""
l
,
h
,
w
=
self
.
l
,
self
.
h
,
self
.
w
x_corners
=
[
l
/
2
,
l
/
2
,
-
l
/
2
,
-
l
/
2
,
l
/
2
,
l
/
2
,
-
l
/
2
,
-
l
/
2
]
y_corners
=
[
0
,
0
,
0
,
0
,
-
h
,
-
h
,
-
h
,
-
h
]
z_corners
=
[
w
/
2
,
-
w
/
2
,
-
w
/
2
,
w
/
2
,
w
/
2
,
-
w
/
2
,
-
w
/
2
,
w
/
2
]
R
=
np
.
array
([[
np
.
cos
(
self
.
ry
),
0
,
np
.
sin
(
self
.
ry
)],
[
0
,
1
,
0
],
[
-
np
.
sin
(
self
.
ry
),
0
,
np
.
cos
(
self
.
ry
)]])
corners3d
=
np
.
vstack
([
x_corners
,
y_corners
,
z_corners
])
# (3, 8)
corners3d
=
np
.
dot
(
R
,
corners3d
).
T
corners3d
=
corners3d
+
self
.
loc
return
corners3d
def
to_str
(
self
):
print_str
=
'%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f'
\
%
(
self
.
cls_type
,
self
.
truncation
,
self
.
occlusion
,
self
.
alpha
,
self
.
box2d
,
self
.
h
,
self
.
w
,
self
.
l
,
self
.
loc
,
self
.
ry
)
return
print_str
def
to_custom_format
(
self
):
custom_str
=
'%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f'
\
%
(
self
.
cls_type
,
self
.
truncation
,
int
(
self
.
occlusion
),
self
.
alpha
,
self
.
box2d
[
0
],
self
.
box2d
[
1
],
self
.
box2d
[
2
],
self
.
box2d
[
3
],
self
.
h
,
self
.
w
,
self
.
l
,
self
.
loc
[
0
],
self
.
loc
[
1
],
self
.
loc
[
2
],
self
.
ry
)
return
custom_str
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment