Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
2eebdc2d
Commit
2eebdc2d
authored
Jul 22, 2021
by
Yezhen Cong
Committed by
Tai-Wang
Sep 24, 2021
Browse files
[Refactor] Main code modification for coordinate system refactor (#677)
parent
26ab7ff2
Changes
97
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
496 additions
and
138 deletions
+496
-138
mmdet3d/core/bbox/structures/utils.py
mmdet3d/core/bbox/structures/utils.py
+113
-51
mmdet3d/core/bbox/transforms.py
mmdet3d/core/bbox/transforms.py
+2
-2
mmdet3d/core/points/base_points.py
mmdet3d/core/points/base_points.py
+10
-22
mmdet3d/core/utils/__init__.py
mmdet3d/core/utils/__init__.py
+5
-1
mmdet3d/core/utils/array_converter.py
mmdet3d/core/utils/array_converter.py
+320
-0
mmdet3d/core/visualizer/show_result.py
mmdet3d/core/visualizer/show_result.py
+2
-4
mmdet3d/datasets/kitti_dataset.py
mmdet3d/datasets/kitti_dataset.py
+0
-2
mmdet3d/datasets/lyft_dataset.py
mmdet3d/datasets/lyft_dataset.py
+4
-4
mmdet3d/datasets/nuscenes_dataset.py
mmdet3d/datasets/nuscenes_dataset.py
+4
-4
mmdet3d/datasets/pipelines/data_augment_utils.py
mmdet3d/datasets/pipelines/data_augment_utils.py
+10
-10
mmdet3d/datasets/pipelines/formating.py
mmdet3d/datasets/pipelines/formating.py
+2
-1
mmdet3d/datasets/pipelines/transforms_3d.py
mmdet3d/datasets/pipelines/transforms_3d.py
+3
-1
mmdet3d/datasets/waymo_dataset.py
mmdet3d/datasets/waymo_dataset.py
+0
-1
mmdet3d/models/dense_heads/anchor3d_head.py
mmdet3d/models/dense_heads/anchor3d_head.py
+3
-3
mmdet3d/models/dense_heads/anchor_free_mono3d_head.py
mmdet3d/models/dense_heads/anchor_free_mono3d_head.py
+1
-0
mmdet3d/models/dense_heads/fcos_mono3d_head.py
mmdet3d/models/dense_heads/fcos_mono3d_head.py
+7
-2
mmdet3d/models/dense_heads/free_anchor3d_head.py
mmdet3d/models/dense_heads/free_anchor3d_head.py
+1
-0
mmdet3d/models/dense_heads/groupfree3d_head.py
mmdet3d/models/dense_heads/groupfree3d_head.py
+1
-1
mmdet3d/models/dense_heads/parta2_rpn_head.py
mmdet3d/models/dense_heads/parta2_rpn_head.py
+3
-3
mmdet3d/models/dense_heads/ssd_3d_head.py
mmdet3d/models/dense_heads/ssd_3d_head.py
+5
-26
No files found.
mmdet3d/core/bbox/structures/utils.py
View file @
2eebdc2d
...
...
@@ -3,84 +3,138 @@ import numpy as np
import
torch
from
logging
import
warning
from
mmdet3d.core.utils
import
array_converter
@
array_converter
(
apply_to
=
(
'val'
,
))
def
limit_period
(
val
,
offset
=
0.5
,
period
=
np
.
pi
):
"""Limit the value into a period for periodic function.
Args:
val (torch.Tensor): The value to be converted.
offset (float, optional): Offset to set the value range.
\
val (torch.Tensor
| np.ndarray
): The value to be converted.
offset (float, optional): Offset to set the value range.
Defaults to 0.5.
period ([type], optional): Period of the value. Defaults to np.pi.
Returns:
torch.Tensor: Value in the range of
\
(
torch.Tensor
| np.ndarray)
: Value in the range of
[-offset * period, (1-offset) * period]
"""
return
val
-
torch
.
floor
(
val
/
period
+
offset
)
*
period
limited_val
=
val
-
torch
.
floor
(
val
/
period
+
offset
)
*
period
return
limited_val
def
rotation_3d_in_axis
(
points
,
angles
,
axis
=
0
):
@
array_converter
(
apply_to
=
(
'points'
,
'angles'
))
def
rotation_3d_in_axis
(
points
,
angles
,
axis
=
0
,
return_mat
=
False
,
clockwise
=
False
):
"""Rotate points by angles according to axis.
Args:
points (torch.Tensor): Points of shape (N, M, 3).
angles (torch.Tensor): Vector of angles in shape (N,)
points (np.ndarray | torch.Tensor | list | tuple ):
Points of shape (N, M, 3).
angles (np.ndarray | torch.Tensor | list | tuple | float):
Vector of angles in shape (N,)
axis (int, optional): The axis to be rotated. Defaults to 0.
return_mat: Whether or not return the rotation matrix (transposed).
Defaults to False.
clockwise: Whether the rotation is clockwise. Defaults to False.
Raises:
ValueError: when the axis is not in range [0, 1, 2], it will
\
ValueError: when the axis is not in range [0, 1, 2], it will
raise value error.
Returns:
torch.Tensor: Rotated points in shape (N, M, 3)
(
torch.Tensor
| np.ndarray)
: Rotated points in shape (N, M, 3)
.
"""
batch_free
=
len
(
points
.
shape
)
==
2
if
batch_free
:
points
=
points
[
None
]
if
isinstance
(
angles
,
float
)
or
len
(
angles
.
shape
)
==
0
:
angles
=
torch
.
full
(
points
.
shape
[:
1
],
angles
)
assert
len
(
points
.
shape
)
==
3
and
len
(
angles
.
shape
)
==
1
\
and
points
.
shape
[
0
]
==
angles
.
shape
[
0
],
f
'Incorrect shape of points '
\
f
'angles:
{
points
.
shape
}
,
{
angles
.
shape
}
'
assert
points
.
shape
[
-
1
]
in
[
2
,
3
],
\
f
'Points size should be 2 or 3 instead of
{
points
.
shape
[
-
1
]
}
'
rot_sin
=
torch
.
sin
(
angles
)
rot_cos
=
torch
.
cos
(
angles
)
ones
=
torch
.
ones_like
(
rot_cos
)
zeros
=
torch
.
zeros_like
(
rot_cos
)
if
axis
==
1
:
rot_mat_T
=
torch
.
stack
([
torch
.
stack
([
rot_cos
,
zeros
,
-
rot_sin
]),
torch
.
stack
([
zeros
,
ones
,
zeros
]),
torch
.
stack
([
rot_sin
,
zeros
,
rot_cos
])
])
elif
axis
==
2
or
axis
==
-
1
:
rot_mat_T
=
torch
.
stack
([
torch
.
stack
([
rot_cos
,
-
rot_sin
,
zeros
]),
torch
.
stack
([
rot_sin
,
rot_cos
,
zeros
]),
torch
.
stack
([
zeros
,
zeros
,
ones
])
])
elif
axis
==
0
:
if
points
.
shape
[
-
1
]
==
3
:
if
axis
==
1
or
axis
==
-
2
:
rot_mat_T
=
torch
.
stack
([
torch
.
stack
([
rot_cos
,
zeros
,
rot_sin
]),
torch
.
stack
([
zeros
,
ones
,
zeros
]),
torch
.
stack
([
-
rot_sin
,
zeros
,
rot_cos
])
])
elif
axis
==
2
or
axis
==
-
1
:
rot_mat_T
=
torch
.
stack
([
torch
.
stack
([
rot_cos
,
rot_sin
,
zeros
]),
torch
.
stack
([
-
rot_sin
,
rot_cos
,
zeros
]),
torch
.
stack
([
zeros
,
zeros
,
ones
])
])
elif
axis
==
0
or
axis
==
-
3
:
rot_mat_T
=
torch
.
stack
([
torch
.
stack
([
ones
,
zeros
,
zeros
]),
torch
.
stack
([
zeros
,
rot_cos
,
rot_sin
]),
torch
.
stack
([
zeros
,
-
rot_sin
,
rot_cos
])
])
else
:
raise
ValueError
(
f
'axis should in range '
f
'[-3, -2, -1, 0, 1, 2], got
{
axis
}
'
)
else
:
rot_mat_T
=
torch
.
stack
([
torch
.
stack
([
zeros
,
rot_cos
,
-
rot_sin
]),
torch
.
stack
([
zeros
,
rot_sin
,
rot_cos
]),
torch
.
stack
([
ones
,
zeros
,
zeros
])
torch
.
stack
([
rot_cos
,
rot_sin
]),
torch
.
stack
([
-
rot_sin
,
rot_cos
])
])
if
clockwise
:
rot_mat_T
=
rot_mat_T
.
transpose
(
0
,
1
)
if
points
.
shape
[
0
]
==
0
:
points_new
=
points
else
:
raise
ValueError
(
f
'axis should in range [0, 1, 2], got
{
axis
}
'
)
points_new
=
torch
.
einsum
(
'aij,jka->aik'
,
points
,
rot_mat_T
)
if
batch_free
:
points_new
=
points_new
.
squeeze
(
0
)
return
torch
.
einsum
(
'aij,jka->aik'
,
(
points
,
rot_mat_T
))
if
return_mat
:
rot_mat_T
=
torch
.
einsum
(
'jka->ajk'
,
rot_mat_T
)
if
batch_free
:
rot_mat_T
=
rot_mat_T
.
squeeze
(
0
)
return
points_new
,
rot_mat_T
else
:
return
points_new
@
array_converter
(
apply_to
=
(
'boxes_xywhr'
,
))
def
xywhr2xyxyr
(
boxes_xywhr
):
"""Convert a rotated boxes in XYWHR format to XYXYR format.
Args:
boxes_xywhr (torch.Tensor): Rotated boxes in XYWHR format.
boxes_xywhr (torch.Tensor
| np.ndarray
): Rotated boxes in XYWHR format.
Returns:
torch.Tensor: Converted boxes in XYXYR format.
(
torch.Tensor
| np.ndarray)
: Converted boxes in XYXYR format.
"""
boxes
=
torch
.
zeros_like
(
boxes_xywhr
)
half_w
=
boxes_xywhr
[
:
,
2
]
/
2
half_h
=
boxes_xywhr
[
:
,
3
]
/
2
boxes
[
:
,
0
]
=
boxes_xywhr
[
:
,
0
]
-
half_w
boxes
[
:
,
1
]
=
boxes_xywhr
[
:
,
1
]
-
half_h
boxes
[
:
,
2
]
=
boxes_xywhr
[
:
,
0
]
+
half_w
boxes
[
:
,
3
]
=
boxes_xywhr
[
:
,
1
]
+
half_h
boxes
[
:
,
4
]
=
boxes_xywhr
[
:
,
4
]
half_w
=
boxes_xywhr
[
...
,
2
]
/
2
half_h
=
boxes_xywhr
[
...
,
3
]
/
2
boxes
[
...
,
0
]
=
boxes_xywhr
[
...
,
0
]
-
half_w
boxes
[
...
,
1
]
=
boxes_xywhr
[
...
,
1
]
-
half_h
boxes
[
...
,
2
]
=
boxes_xywhr
[
...
,
0
]
+
half_w
boxes
[
...
,
3
]
=
boxes_xywhr
[
...
,
1
]
+
half_h
boxes
[
...
,
4
]
=
boxes_xywhr
[
...
,
4
]
return
boxes
...
...
@@ -91,6 +145,10 @@ def get_box_type(box_type):
box_type (str): The type of box structure.
The valid value are "LiDAR", "Camera", or "Depth".
Raises:
ValueError: A ValueError is raised when `box_type`
does not belong to the three valid types.
Returns:
tuple: Box type and box mode.
"""
...
...
@@ -113,21 +171,24 @@ def get_box_type(box_type):
return
box_type_3d
,
box_mode_3d
@
array_converter
(
apply_to
=
(
'points_3d'
,
'proj_mat'
))
def
points_cam2img
(
points_3d
,
proj_mat
,
with_depth
=
False
):
"""Project points
from
camera coordi
c
ates to image coordinates.
"""Project points
in
camera coordi
n
ates to image coordinates.
Args:
points_3d (torch.Tensor): Points in shape (N, 3).
proj_mat (torch.Tensor): Transformation matrix between coordinates.
points_3d (torch.Tensor | np.ndarray): Points in shape (N, 3)
proj_mat (torch.Tensor | np.ndarray):
Transformation matrix between coordinates.
with_depth (bool, optional): Whether to keep depth in the output.
Defaults to False.
Returns:
torch.Tensor: Points in image coordinates with shape [N, 2].
(torch.Tensor | np.ndarray): Points in image coordinates,
with shape [N, 2] if `with_depth=False`, else [N, 3].
"""
points_num
=
list
(
points_3d
.
shape
)[:
-
1
]
points_shape
=
list
(
points_3d
.
shape
)
points_shape
[
-
1
]
=
1
points_shape
=
np
.
concatenate
([
points_num
,
[
1
]],
axis
=
0
).
tolist
()
assert
len
(
proj_mat
.
shape
)
==
2
,
'The dimension of the projection'
\
f
' matrix should be 2 instead of
{
len
(
proj_mat
.
shape
)
}
.'
d1
,
d2
=
proj_mat
.
shape
[:
2
]
...
...
@@ -140,14 +201,15 @@ def points_cam2img(points_3d, proj_mat, with_depth=False):
proj_mat_expanded
[:
d1
,
:
d2
]
=
proj_mat
proj_mat
=
proj_mat_expanded
# previous implementation use new_zeros, new_one y
e
ilds better results
points_4
=
torch
.
cat
(
[
points_3d
,
points_3d
.
new_ones
(
*
points_shape
)],
dim
=-
1
)
point_2d
=
torch
.
matmul
(
points_4
,
proj_mat
.
t
())
# previous implementation use new_zeros, new_one yi
e
lds better results
points_4
=
torch
.
cat
(
[
points_3d
,
points_3d
.
new_ones
(
points_shape
)],
dim
=-
1
)
point_2d
=
points_4
@
proj_mat
.
T
point_2d_res
=
point_2d
[...,
:
2
]
/
point_2d
[...,
2
:
3
]
if
with_depth
:
return
torch
.
cat
([
point_2d_res
,
point_2d
[...,
2
:
3
]],
dim
=-
1
)
point_2d_res
=
torch
.
cat
([
point_2d_res
,
point_2d
[...,
2
:
3
]],
dim
=-
1
)
return
point_2d_res
...
...
@@ -162,9 +224,9 @@ def mono_cam_box2vis(cam_box):
After applying this function, we can project and draw it on 2D images.
Args:
cam_box (:obj:`CameraInstance3DBoxes`): 3D bbox in camera coordinate
\
system before conversion. Could be gt bbox loaded from dataset
or
\
network prediction output.
cam_box (:obj:`CameraInstance3DBoxes`): 3D bbox in camera coordinate
system before conversion. Could be gt bbox loaded from dataset
or
network prediction output.
Returns:
:obj:`CameraInstance3DBoxes`: Box after conversion.
...
...
mmdet3d/core/bbox/transforms.py
View file @
2eebdc2d
...
...
@@ -32,7 +32,7 @@ def bbox3d2roi(bbox_list):
corresponding to a batch of images.
Returns:
torch.Tensor: Region of interests in shape (n, c), where
\
torch.Tensor: Region of interests in shape (n, c), where
the channels are in order of [batch_ind, x, y ...].
"""
rois_list
=
[]
...
...
@@ -54,7 +54,7 @@ def bbox3d2result(bboxes, scores, labels, attrs=None):
bboxes (torch.Tensor): Bounding boxes with shape of (n, 5).
labels (torch.Tensor): Labels with shape of (n, ).
scores (torch.Tensor): Scores with shape of (n, ).
attrs (torch.Tensor, optional): Attributes with shape of (n, ).
\
attrs (torch.Tensor, optional): Attributes with shape of (n, ).
Defaults to None.
Returns:
...
...
mmdet3d/core/points/base_points.py
View file @
2eebdc2d
...
...
@@ -4,6 +4,8 @@ import torch
import
warnings
from
abc
import
abstractmethod
from
..bbox.structures.utils
import
rotation_3d_in_axis
class
BasePoints
(
object
):
"""Base class for Points.
...
...
@@ -141,7 +143,7 @@ class BasePoints(object):
"""Rotate points with the given rotation matrix or angle.
Args:
rotation (float
,
np.ndarray
,
torch.Tensor): Rotation matrix
rotation (float
|
np.ndarray
|
torch.Tensor): Rotation matrix
or angle.
axis (int): Axis to rotate at. Defaults to None.
"""
...
...
@@ -154,28 +156,14 @@ class BasePoints(object):
axis
=
self
.
rotation_axis
if
rotation
.
numel
()
==
1
:
rot_sin
=
torch
.
sin
(
rotation
)
rot_cos
=
torch
.
cos
(
rotation
)
if
axis
==
1
:
rot_mat_T
=
rotation
.
new_tensor
([[
rot_cos
,
0
,
-
rot_sin
],
[
0
,
1
,
0
],
[
rot_sin
,
0
,
rot_cos
]])
elif
axis
==
2
or
axis
==
-
1
:
rot_mat_T
=
rotation
.
new_tensor
([[
rot_cos
,
-
rot_sin
,
0
],
[
rot_sin
,
rot_cos
,
0
],
[
0
,
0
,
1
]])
elif
axis
==
0
:
rot_mat_T
=
rotation
.
new_tensor
([[
0
,
rot_cos
,
-
rot_sin
],
[
0
,
rot_sin
,
rot_cos
],
[
1
,
0
,
0
]])
else
:
raise
ValueError
(
'axis should in range'
)
rot_mat_T
=
rot_mat_T
.
T
elif
rotation
.
numel
()
==
9
:
rot_mat_T
=
rotation
rotated_points
,
rot_mat_T
=
rotation_3d_in_axis
(
self
.
tensor
[:,
:
3
][
None
],
rotation
,
axis
=
axis
,
return_mat
=
True
)
self
.
tensor
[:,
:
3
]
=
rotated_points
.
squeeze
(
0
)
rot_mat_T
=
rot_mat_T
.
squeeze
(
0
)
else
:
raise
NotImplementedError
self
.
tensor
[:,
:
3
]
=
self
.
tensor
[:,
:
3
]
@
rot_mat_T
# rotation.numel() == 9
self
.
tensor
[:,
:
3
]
=
self
.
tensor
[:,
:
3
]
@
rotation
rot_mat_T
=
rotation
return
rot_mat_T
...
...
mmdet3d/core/utils/__init__.py
View file @
2eebdc2d
# Copyright (c) OpenMMLab. All rights reserved.
from
.array_converter
import
ArrayConverter
,
array_converter
from
.gaussian
import
draw_heatmap_gaussian
,
gaussian_2d
,
gaussian_radius
__all__
=
[
'gaussian_2d'
,
'gaussian_radius'
,
'draw_heatmap_gaussian'
]
__all__
=
[
'gaussian_2d'
,
'gaussian_radius'
,
'draw_heatmap_gaussian'
,
'ArrayConverter'
,
'array_converter'
]
mmdet3d/core/utils/array_converter.py
0 → 100644
View file @
2eebdc2d
import
functools
import
numpy
as
np
import
torch
from
inspect
import
getfullargspec
def
array_converter
(
to_torch
=
True
,
apply_to
=
tuple
(),
template_arg_name_
=
None
,
recover
=
True
):
"""Wrapper function for data-type agnostic processing.
First converts input arrays to PyTorch tensors or NumPy ndarrays
for middle calculation, then convert output to original data-type.
Args:
to_torch (Bool): Whether convert to PyTorch tensors
for middle calculation. Defaults to True.
apply_to (tuple[str]): The arguments to which we apply data-type
conversion. Defaults to an empty tuple.
template_arg_name_ (str): Argument serving as the template (
return arrays should have the same dtype and device
as the template). Defaults to None. If None, we will use the
first argument in `apply_to` as the template argument.
recover (Bool): Whether or not recover the wrapped function outputs
to the `template_arg_name_` type. Defaults to True.
Raises:
ValueError: When template_arg_name_ is not among all args, or
when apply_to contains an arg which is not among all args,
a ValueError will be raised. When the template argument or
an argument to convert is a list or tuple, and cannot be
converted to a NumPy array, a ValueError will be raised.
TypeError: When the type of the template argument or
an argument to convert does not belong to the above range,
or the contents of such an list-or-tuple-type argument
do not share the same data type, a TypeError is raised.
Returns:
(function): wrapped function.
Example:
>>> import torch
>>> import numpy as np
>>>
>>> # Use torch addition for a + b,
>>> # and convert return values to the type of a
>>> @array_converter(apply_to=('a', 'b'))
>>> def simple_add(a, b):
>>> return a + b
>>>
>>> a = np.array([1.1])
>>> b = np.array([2.2])
>>> simple_add(a, b)
>>>
>>> # Use numpy addition for a + b,
>>> # and convert return values to the type of b
>>> @array_converter(to_torch=False, apply_to=('a', 'b'),
>>> template_arg_name_='b')
>>> def simple_add(a, b):
>>> return a + b
>>>
>>> simple_add()
>>>
>>> # Use torch funcs for floor(a) if flag=True else ceil(a),
>>> # and return the torch tensor
>>> @array_converter(apply_to=('a',), recover=False)
>>> def floor_or_ceil(a, flag=True):
>>> return torch.floor(a) if flag else torch.ceil(a)
>>>
>>> floor_or_ceil(a, flag=False)
"""
def
array_converter_wrapper
(
func
):
"""Outer wrapper for the function."""
@
functools
.
wraps
(
func
)
def
new_func
(
*
args
,
**
kwargs
):
"""Inner wrapper for the arguments."""
if
len
(
apply_to
)
==
0
:
return
func
(
*
args
,
**
kwargs
)
func_name
=
func
.
__name__
arg_spec
=
getfullargspec
(
func
)
arg_names
=
arg_spec
.
args
arg_num
=
len
(
arg_names
)
default_arg_values
=
arg_spec
.
defaults
if
default_arg_values
is
None
:
default_arg_values
=
[]
no_default_arg_num
=
len
(
arg_names
)
-
len
(
default_arg_values
)
kwonly_arg_names
=
arg_spec
.
kwonlyargs
kwonly_default_arg_values
=
arg_spec
.
kwonlydefaults
if
kwonly_default_arg_values
is
None
:
kwonly_default_arg_values
=
{}
all_arg_names
=
arg_names
+
kwonly_arg_names
# in case there are args in the form of *args
if
len
(
args
)
>
arg_num
:
named_args
=
args
[:
arg_num
]
nameless_args
=
args
[
arg_num
:]
else
:
named_args
=
args
nameless_args
=
[]
# template argument data type is used for all array-like arguments
if
template_arg_name_
is
None
:
template_arg_name
=
apply_to
[
0
]
else
:
template_arg_name
=
template_arg_name_
if
template_arg_name
not
in
all_arg_names
:
raise
ValueError
(
f
'
{
template_arg_name
}
is not among the '
f
'argument list of function
{
func_name
}
'
)
# inspect apply_to
for
arg_to_apply
in
apply_to
:
if
arg_to_apply
not
in
all_arg_names
:
raise
ValueError
(
f
'
{
arg_to_apply
}
is not '
f
'an argument of
{
func_name
}
'
)
new_args
=
[]
new_kwargs
=
{}
converter
=
ArrayConverter
()
target_type
=
torch
.
Tensor
if
to_torch
else
np
.
ndarray
# non-keyword arguments
for
i
,
arg_value
in
enumerate
(
named_args
):
if
arg_names
[
i
]
in
apply_to
:
new_args
.
append
(
converter
.
convert
(
input_array
=
arg_value
,
target_type
=
target_type
))
else
:
new_args
.
append
(
arg_value
)
if
arg_names
[
i
]
==
template_arg_name
:
template_arg_value
=
arg_value
kwonly_default_arg_values
.
update
(
kwargs
)
kwargs
=
kwonly_default_arg_values
# keyword arguments and non-keyword arguments using default value
for
i
in
range
(
len
(
named_args
),
len
(
all_arg_names
)):
arg_name
=
all_arg_names
[
i
]
if
arg_name
in
kwargs
:
if
arg_name
in
apply_to
:
new_kwargs
[
arg_name
]
=
converter
.
convert
(
input_array
=
kwargs
[
arg_name
],
target_type
=
target_type
)
else
:
new_kwargs
[
arg_name
]
=
kwargs
[
arg_name
]
else
:
default_value
=
default_arg_values
[
i
-
no_default_arg_num
]
if
arg_name
in
apply_to
:
new_kwargs
[
arg_name
]
=
converter
.
convert
(
input_array
=
default_value
,
target_type
=
target_type
)
else
:
new_kwargs
[
arg_name
]
=
default_value
if
arg_name
==
template_arg_name
:
template_arg_value
=
kwargs
[
arg_name
]
# add nameless args provided by *args (if exists)
new_args
+=
nameless_args
return_values
=
func
(
*
new_args
,
**
new_kwargs
)
converter
.
set_template
(
template_arg_value
)
def
recursive_recover
(
input_data
):
if
isinstance
(
input_data
,
(
tuple
,
list
)):
new_data
=
[]
for
item
in
input_data
:
new_data
.
append
(
recursive_recover
(
item
))
return
tuple
(
new_data
)
if
isinstance
(
input_data
,
tuple
)
else
new_data
elif
isinstance
(
input_data
,
dict
):
new_data
=
{}
for
k
,
v
in
input_data
.
items
():
new_data
[
k
]
=
recursive_recover
(
v
)
return
new_data
elif
isinstance
(
input_data
,
(
torch
.
Tensor
,
np
.
ndarray
)):
return
converter
.
recover
(
input_data
)
else
:
return
input_data
if
recover
:
return
recursive_recover
(
return_values
)
else
:
return
return_values
return
new_func
return
array_converter_wrapper
class
ArrayConverter
:
SUPPORTED_NON_ARRAY_TYPES
=
(
int
,
float
,
np
.
int8
,
np
.
int16
,
np
.
int32
,
np
.
int64
,
np
.
uint8
,
np
.
uint16
,
np
.
uint32
,
np
.
uint64
,
np
.
float16
,
np
.
float32
,
np
.
float64
)
def
__init__
(
self
,
template_array
=
None
):
if
template_array
is
not
None
:
self
.
set_template
(
template_array
)
def
set_template
(
self
,
array
):
"""Set template array.
Args:
array (tuple | list | int | float | np.ndarray | torch.Tensor):
Template array.
Raises:
ValueError: If input is list or tuple and cannot be converted to
to a NumPy array, a ValueError is raised.
TypeError: If input type does not belong to the above range,
or the contents of a list or tuple do not share the
same data type, a TypeError is raised.
"""
self
.
array_type
=
type
(
array
)
self
.
is_num
=
False
self
.
device
=
'cpu'
if
isinstance
(
array
,
np
.
ndarray
):
self
.
dtype
=
array
.
dtype
elif
isinstance
(
array
,
torch
.
Tensor
):
self
.
dtype
=
array
.
dtype
self
.
device
=
array
.
device
elif
isinstance
(
array
,
(
list
,
tuple
)):
try
:
array
=
np
.
array
(
array
)
if
array
.
dtype
not
in
self
.
SUPPORTED_NON_ARRAY_TYPES
:
raise
TypeError
self
.
dtype
=
array
.
dtype
except
(
ValueError
,
TypeError
):
print
(
f
'The following list cannot be converted to'
f
' a numpy array of supported dtype:
\n
{
array
}
'
)
raise
elif
isinstance
(
array
,
self
.
SUPPORTED_NON_ARRAY_TYPES
):
self
.
array_type
=
np
.
ndarray
self
.
is_num
=
True
self
.
dtype
=
np
.
dtype
(
type
(
array
))
else
:
raise
TypeError
(
f
'Template type
{
self
.
array_type
}
'
f
' is not supported.'
)
def
convert
(
self
,
input_array
,
target_type
=
None
,
target_array
=
None
):
"""Convert input array to target data type.
Args:
input_array (tuple | list | np.ndarray |
torch.Tensor | int | float ):
Input array. Defaults to None.
target_type (<class 'np.ndarray'> | <class 'torch.Tensor'>):
Type to which input array is converted. Defaults to None.
target_array (np.ndarray | torch.Tensor):
Template array to which input array is converted.
Defaults to None.
Raises:
ValueError: If input is list or tuple and cannot be converted to
to a NumPy array, a ValueError is raised.
TypeError: If input type does not belong to the above range,
or the contents of a list or tuple do not share the
same data type, a TypeError is raised.
"""
if
isinstance
(
input_array
,
(
list
,
tuple
)):
try
:
input_array
=
np
.
array
(
input_array
)
if
input_array
.
dtype
not
in
self
.
SUPPORTED_NON_ARRAY_TYPES
:
raise
TypeError
except
(
ValueError
,
TypeError
):
print
(
f
'The input cannot be converted to'
f
' a single-type numpy array:
\n
{
input_array
}
'
)
raise
elif
isinstance
(
input_array
,
self
.
SUPPORTED_NON_ARRAY_TYPES
):
input_array
=
np
.
array
(
input_array
)
array_type
=
type
(
input_array
)
assert
target_type
is
not
None
or
target_array
is
not
None
,
\
'must specify a target'
if
target_type
is
not
None
:
assert
target_type
in
(
np
.
ndarray
,
torch
.
Tensor
),
\
'invalid target type'
if
target_type
==
array_type
:
return
input_array
elif
target_type
==
np
.
ndarray
:
# default dtype is float32
converted_array
=
input_array
.
cpu
().
numpy
().
astype
(
np
.
float32
)
else
:
# default dtype is float32, device is 'cpu'
converted_array
=
torch
.
tensor
(
input_array
,
dtype
=
torch
.
float32
)
else
:
assert
isinstance
(
target_array
,
(
np
.
ndarray
,
torch
.
Tensor
)),
\
'invalid target array type'
if
isinstance
(
target_array
,
array_type
):
return
input_array
elif
isinstance
(
target_array
,
np
.
ndarray
):
converted_array
=
input_array
.
cpu
().
numpy
().
astype
(
target_array
.
dtype
)
else
:
converted_array
=
target_array
.
new_tensor
(
input_array
)
return
converted_array
def
recover
(
self
,
input_array
):
assert
isinstance
(
input_array
,
(
np
.
ndarray
,
torch
.
Tensor
)),
\
'invalid input array type'
if
isinstance
(
input_array
,
self
.
array_type
):
return
input_array
elif
isinstance
(
input_array
,
torch
.
Tensor
):
converted_array
=
input_array
.
cpu
().
numpy
().
astype
(
self
.
dtype
)
else
:
converted_array
=
torch
.
tensor
(
input_array
,
dtype
=
self
.
dtype
,
device
=
self
.
device
)
if
self
.
is_num
:
converted_array
=
converted_array
.
item
()
return
converted_array
mmdet3d/core/visualizer/show_result.py
View file @
2eebdc2d
...
...
@@ -111,16 +111,14 @@ def show_result(points,
if
gt_bboxes
is
not
None
:
# bottom center to gravity center
gt_bboxes
[...,
2
]
+=
gt_bboxes
[...,
5
]
/
2
# the positive direction for yaw in meshlab is clockwise
gt_bboxes
[:,
6
]
*=
-
1
_write_oriented_bbox
(
gt_bboxes
,
osp
.
join
(
result_path
,
f
'
{
filename
}
_gt.obj'
))
if
pred_bboxes
is
not
None
:
# bottom center to gravity center
pred_bboxes
[...,
2
]
+=
pred_bboxes
[...,
5
]
/
2
# the positive direction for yaw in meshlab is clockwise
pred_bboxes
[:,
6
]
*=
-
1
_write_oriented_bbox
(
pred_bboxes
,
osp
.
join
(
result_path
,
f
'
{
filename
}
_pred.obj'
))
...
...
mmdet3d/datasets/kitti_dataset.py
View file @
2eebdc2d
...
...
@@ -617,8 +617,6 @@ class KittiDataset(Custom3DDataset):
scores
=
box_dict
[
'scores_3d'
]
labels
=
box_dict
[
'labels_3d'
]
sample_idx
=
info
[
'image'
][
'image_idx'
]
# TODO: remove the hack of yaw
box_preds
.
tensor
[:,
-
1
]
=
box_preds
.
tensor
[:,
-
1
]
-
np
.
pi
box_preds
.
limit_yaw
(
offset
=
0.5
,
period
=
np
.
pi
*
2
)
if
len
(
box_preds
)
==
0
:
...
...
mmdet3d/datasets/lyft_dataset.py
View file @
2eebdc2d
...
...
@@ -517,16 +517,16 @@ def output_to_lyft_box(detection):
box_gravity_center
=
box3d
.
gravity_center
.
numpy
()
box_dims
=
box3d
.
dims
.
numpy
()
box_yaw
=
box3d
.
yaw
.
numpy
()
# TODO: check whether this is necessary
#
with dir_offset & dir_limit in the head
box_yaw
=
-
box_
yaw
-
np
.
pi
/
2
#
our LiDAR coordinate system -> Lyft box coordinate system
lyft_box_dims
=
box_
dims
[:,
[
1
,
0
,
2
]]
box_list
=
[]
for
i
in
range
(
len
(
box3d
)):
quat
=
Quaternion
(
axis
=
[
0
,
0
,
1
],
radians
=
box_yaw
[
i
])
box
=
LyftBox
(
box_gravity_center
[
i
],
box_dims
[
i
],
lyft_
box_dims
[
i
],
quat
,
label
=
labels
[
i
],
score
=
scores
[
i
])
...
...
mmdet3d/datasets/nuscenes_dataset.py
View file @
2eebdc2d
...
...
@@ -588,9 +588,9 @@ def output_to_nusc_box(detection):
box_gravity_center
=
box3d
.
gravity_center
.
numpy
()
box_dims
=
box3d
.
dims
.
numpy
()
box_yaw
=
box3d
.
yaw
.
numpy
()
# TODO: check whether this is necessary
#
with dir_offset & dir_limit in the head
box_yaw
=
-
box_
yaw
-
np
.
pi
/
2
#
our LiDAR coordinate system -> nuScenes box coordinate system
nus_box_dims
=
box_
dims
[:,
[
1
,
0
,
2
]]
box_list
=
[]
for
i
in
range
(
len
(
box3d
)):
...
...
@@ -602,7 +602,7 @@ def output_to_nusc_box(detection):
# velo_val * np.cos(velo_ori), velo_val * np.sin(velo_ori), 0.0)
box
=
NuScenesBox
(
box_gravity_center
[
i
],
box_dims
[
i
],
nus_
box_dims
[
i
],
quat
,
label
=
labels
[
i
],
score
=
scores
[
i
],
...
...
mmdet3d/datasets/pipelines/data_augment_utils.py
View file @
2eebdc2d
...
...
@@ -21,8 +21,8 @@ def _rotation_box2d_jit_(corners, angle, rot_mat_T):
rot_sin
=
np
.
sin
(
angle
)
rot_cos
=
np
.
cos
(
angle
)
rot_mat_T
[
0
,
0
]
=
rot_cos
rot_mat_T
[
0
,
1
]
=
-
rot_sin
rot_mat_T
[
1
,
0
]
=
rot_sin
rot_mat_T
[
0
,
1
]
=
rot_sin
rot_mat_T
[
1
,
0
]
=
-
rot_sin
rot_mat_T
[
1
,
1
]
=
rot_cos
corners
[:]
=
corners
@
rot_mat_T
...
...
@@ -211,8 +211,8 @@ def noise_per_box_v2_(boxes, valid_mask, loc_noises, rot_noises,
rot_sin
=
np
.
sin
(
current_box
[
0
,
-
1
])
rot_cos
=
np
.
cos
(
current_box
[
0
,
-
1
])
rot_mat_T
[
0
,
0
]
=
rot_cos
rot_mat_T
[
0
,
1
]
=
-
rot_sin
rot_mat_T
[
1
,
0
]
=
rot_sin
rot_mat_T
[
0
,
1
]
=
rot_sin
rot_mat_T
[
1
,
0
]
=
-
rot_sin
rot_mat_T
[
1
,
1
]
=
rot_cos
current_corners
[:]
=
current_box
[
0
,
2
:
4
]
*
corners_norm
@
rot_mat_T
+
current_box
[
0
,
:
2
]
...
...
@@ -264,18 +264,18 @@ def _rotation_matrix_3d_(rot_mat_T, angle, axis):
rot_mat_T
[:]
=
np
.
eye
(
3
)
if
axis
==
1
:
rot_mat_T
[
0
,
0
]
=
rot_cos
rot_mat_T
[
0
,
2
]
=
-
rot_sin
rot_mat_T
[
2
,
0
]
=
rot_sin
rot_mat_T
[
0
,
2
]
=
rot_sin
rot_mat_T
[
2
,
0
]
=
-
rot_sin
rot_mat_T
[
2
,
2
]
=
rot_cos
elif
axis
==
2
or
axis
==
-
1
:
rot_mat_T
[
0
,
0
]
=
rot_cos
rot_mat_T
[
0
,
1
]
=
-
rot_sin
rot_mat_T
[
1
,
0
]
=
rot_sin
rot_mat_T
[
0
,
1
]
=
rot_sin
rot_mat_T
[
1
,
0
]
=
-
rot_sin
rot_mat_T
[
1
,
1
]
=
rot_cos
elif
axis
==
0
:
rot_mat_T
[
1
,
1
]
=
rot_cos
rot_mat_T
[
1
,
2
]
=
-
rot_sin
rot_mat_T
[
2
,
1
]
=
rot_sin
rot_mat_T
[
1
,
2
]
=
rot_sin
rot_mat_T
[
2
,
1
]
=
-
rot_sin
rot_mat_T
[
2
,
2
]
=
rot_cos
...
...
mmdet3d/datasets/pipelines/formating.py
View file @
2eebdc2d
...
...
@@ -137,7 +137,8 @@ class Collect3D(object):
'scale_factor'
,
'flip'
,
'pcd_horizontal_flip'
,
'pcd_vertical_flip'
,
'box_mode_3d'
,
'box_type_3d'
,
'img_norm_cfg'
,
'pcd_trans'
,
'sample_idx'
,
'pcd_scale_factor'
,
'pcd_rotation'
,
'pts_filename'
,
'pcd_scale_factor'
,
'pcd_rotation'
,
'pcd_rotation_angle'
,
'pts_filename'
,
'transformation_3d_flow'
)):
self
.
keys
=
keys
self
.
meta_keys
=
meta_keys
...
...
mmdet3d/datasets/pipelines/transforms_3d.py
View file @
2eebdc2d
...
...
@@ -394,7 +394,7 @@ class ObjectNoise(object):
gt_bboxes_3d
=
input_dict
[
'gt_bboxes_3d'
]
points
=
input_dict
[
'points'
]
# TODO:
check th
is inplace
func
tion
# TODO:
this
is inplace
opera
tion
numpy_box
=
gt_bboxes_3d
.
tensor
.
numpy
()
numpy_points
=
points
.
tensor
.
numpy
()
...
...
@@ -589,6 +589,7 @@ class GlobalRotScaleTrans(object):
if
len
(
input_dict
[
'bbox3d_fields'
])
==
0
:
rot_mat_T
=
input_dict
[
'points'
].
rotate
(
noise_rotation
)
input_dict
[
'pcd_rotation'
]
=
rot_mat_T
input_dict
[
'pcd_rotation_angle'
]
=
noise_rotation
return
# rotate points with bboxes
...
...
@@ -598,6 +599,7 @@ class GlobalRotScaleTrans(object):
noise_rotation
,
input_dict
[
'points'
])
input_dict
[
'points'
]
=
points
input_dict
[
'pcd_rotation'
]
=
rot_mat_T
input_dict
[
'pcd_rotation_angle'
]
=
noise_rotation
def
_scale_bbox_points
(
self
,
input_dict
):
"""Private function to scale bounding boxes and points.
...
...
mmdet3d/datasets/waymo_dataset.py
View file @
2eebdc2d
...
...
@@ -494,7 +494,6 @@ class WaymoDataset(KittiDataset):
scores
=
box_dict
[
'scores_3d'
]
labels
=
box_dict
[
'labels_3d'
]
sample_idx
=
info
[
'image'
][
'image_idx'
]
# TODO: remove the hack of yaw
box_preds
.
limit_yaw
(
offset
=
0.5
,
period
=
np
.
pi
*
2
)
if
len
(
box_preds
)
==
0
:
...
...
mmdet3d/models/dense_heads/anchor3d_head.py
View file @
2eebdc2d
...
...
@@ -51,15 +51,15 @@ class Anchor3DHead(BaseModule, AnchorTrainMixin):
type
=
'Anchor3DRangeGenerator'
,
range
=
[
0
,
-
39.68
,
-
1.78
,
69.12
,
39.68
,
-
1.78
],
strides
=
[
2
],
sizes
=
[[
1.6
,
3.9
,
1.56
]],
sizes
=
[[
3.9
,
1.6
,
1.56
]],
rotations
=
[
0
,
1.57
],
custom_values
=
[],
reshape_out
=
False
),
assigner_per_size
=
False
,
assign_per_class
=
False
,
diff_rad_by_sin
=
True
,
dir_offset
=
0
,
dir_limit_offset
=
1
,
dir_offset
=
-
np
.
pi
/
2
,
dir_limit_offset
=
0
,
bbox_coder
=
dict
(
type
=
'DeltaXYZWLHRBBoxCoder'
),
loss_cls
=
dict
(
type
=
'CrossEntropyLoss'
,
...
...
mmdet3d/models/dense_heads/anchor_free_mono3d_head.py
View file @
2eebdc2d
...
...
@@ -79,6 +79,7 @@ class AnchorFreeMono3DHead(BaseMono3DDenseHead):
use_direction_classifier
=
True
,
diff_rad_by_sin
=
True
,
dir_offset
=
0
,
dir_limit_offset
=
0
,
loss_cls
=
dict
(
type
=
'FocalLoss'
,
use_sigmoid
=
True
,
...
...
mmdet3d/models/dense_heads/fcos_mono3d_head.py
View file @
2eebdc2d
...
...
@@ -217,6 +217,7 @@ class FCOSMono3DHead(AnchorFreeMono3DHead):
@
staticmethod
def
get_direction_target
(
reg_targets
,
dir_offset
=
0
,
dir_limit_offset
=
0
,
num_bins
=
2
,
one_hot
=
True
):
"""Encode direction to 0 ~ num_bins-1.
...
...
@@ -231,7 +232,8 @@ class FCOSMono3DHead(AnchorFreeMono3DHead):
torch.Tensor: Encoded direction targets.
"""
rot_gt
=
reg_targets
[...,
6
]
offset_rot
=
limit_period
(
rot_gt
-
dir_offset
,
0
,
2
*
np
.
pi
)
offset_rot
=
limit_period
(
rot_gt
-
dir_offset
,
dir_limit_offset
,
2
*
np
.
pi
)
dir_cls_targets
=
torch
.
floor
(
offset_rot
/
(
2
*
np
.
pi
/
num_bins
)).
long
()
dir_cls_targets
=
torch
.
clamp
(
dir_cls_targets
,
min
=
0
,
max
=
num_bins
-
1
)
...
...
@@ -377,7 +379,10 @@ class FCOSMono3DHead(AnchorFreeMono3DHead):
if
self
.
use_direction_classifier
:
pos_dir_cls_targets
=
self
.
get_direction_target
(
pos_bbox_targets_3d
,
self
.
dir_offset
,
one_hot
=
False
)
pos_bbox_targets_3d
,
self
.
dir_offset
,
self
.
dir_limit_offset
,
one_hot
=
False
)
if
self
.
diff_rad_by_sin
:
pos_bbox_preds
,
pos_bbox_targets_3d
=
self
.
add_sin_difference
(
...
...
mmdet3d/models/dense_heads/free_anchor3d_head.py
View file @
2eebdc2d
...
...
@@ -195,6 +195,7 @@ class FreeAnchor3DHead(Anchor3DHead):
matched_anchors
,
matched_object_targets
,
self
.
dir_offset
,
self
.
dir_limit_offset
,
one_hot
=
False
)
loss_dir
=
self
.
loss_dir
(
dir_cls_preds_
[
matched
].
transpose
(
-
2
,
-
1
),
...
...
mmdet3d/models/dense_heads/groupfree3d_head.py
View file @
2eebdc2d
...
...
@@ -951,7 +951,7 @@ class GroupFree3DHead(BaseModule):
box_dim
=
bbox
.
shape
[
-
1
],
with_yaw
=
self
.
bbox_coder
.
with_rot
,
origin
=
(
0.5
,
0.5
,
0.5
))
box_indices
=
bbox
.
points_in_boxes
(
points
)
box_indices
=
bbox
.
points_in_boxes
_batch
(
points
)
corner3d
=
bbox
.
corners
minmax_box3d
=
corner3d
.
new
(
torch
.
Size
((
corner3d
.
shape
[
0
],
6
)))
...
...
mmdet3d/models/dense_heads/parta2_rpn_head.py
View file @
2eebdc2d
...
...
@@ -60,15 +60,15 @@ class PartA2RPNHead(Anchor3DHead):
type
=
'Anchor3DRangeGenerator'
,
range
=
[
0
,
-
39.68
,
-
1.78
,
69.12
,
39.68
,
-
1.78
],
strides
=
[
2
],
sizes
=
[[
1.6
,
3.9
,
1.56
]],
sizes
=
[[
3.9
,
1.6
,
1.56
]],
rotations
=
[
0
,
1.57
],
custom_values
=
[],
reshape_out
=
False
),
assigner_per_size
=
False
,
assign_per_class
=
False
,
diff_rad_by_sin
=
True
,
dir_offset
=
0
,
dir_limit_offset
=
1
,
dir_offset
=
-
np
.
pi
/
2
,
dir_limit_offset
=
0
,
bbox_coder
=
dict
(
type
=
'DeltaXYZWLHRBBoxCoder'
),
loss_cls
=
dict
(
type
=
'CrossEntropyLoss'
,
...
...
mmdet3d/models/dense_heads/ssd_3d_head.py
View file @
2eebdc2d
...
...
@@ -464,9 +464,7 @@ class SSD3DHead(VoteHead):
bbox_selected
,
score_selected
,
labels
=
self
.
multiclass_nms_single
(
obj_scores
[
b
],
sem_scores
[
b
],
bbox3d
[
b
],
points
[
b
,
...,
:
3
],
input_metas
[
b
])
# fix the wrong direction
# To do: remove this ops
bbox_selected
[...,
6
]
+=
np
.
pi
bbox
=
input_metas
[
b
][
'box_type_3d'
](
bbox_selected
.
clone
(),
box_dim
=
bbox_selected
.
shape
[
-
1
],
...
...
@@ -489,23 +487,14 @@ class SSD3DHead(VoteHead):
Returns:
tuple[torch.Tensor]: Bounding boxes, scores and labels.
"""
num_bbox
=
bbox
.
shape
[
0
]
bbox
=
input_meta
[
'box_type_3d'
](
bbox
.
clone
(),
box_dim
=
bbox
.
shape
[
-
1
],
with_yaw
=
self
.
bbox_coder
.
with_rot
,
origin
=
(
0.5
,
0.5
,
0.5
))
if
isinstance
(
bbox
,
LiDARInstance3DBoxes
):
box_idx
=
bbox
.
points_in_boxes
(
points
)
box_indices
=
box_idx
.
new_zeros
([
num_bbox
+
1
])
box_idx
[
box_idx
==
-
1
]
=
num_bbox
box_indices
.
scatter_add_
(
0
,
box_idx
.
long
(),
box_idx
.
new_ones
(
box_idx
.
shape
))
box_indices
=
box_indices
[:
-
1
]
nonempty_box_mask
=
box_indices
>=
0
elif
isinstance
(
bbox
,
DepthInstance3DBoxes
):
box_indices
=
bbox
.
points_in_boxes
(
points
)
if
isinstance
(
bbox
,
(
LiDARInstance3DBoxes
,
DepthInstance3DBoxes
)):
box_indices
=
bbox
.
points_in_boxes_batch
(
points
)
nonempty_box_mask
=
box_indices
.
T
.
sum
(
1
)
>=
0
else
:
raise
NotImplementedError
(
'Unsupported bbox type!'
)
...
...
@@ -560,18 +549,8 @@ class SSD3DHead(VoteHead):
tuple[torch.Tensor]: Flags indicating whether each point is
inside bbox and the index of box where each point are in.
"""
# TODO: align points_in_boxes function in each box_structures
num_bbox
=
bboxes_3d
.
tensor
.
shape
[
0
]
if
isinstance
(
bboxes_3d
,
LiDARInstance3DBoxes
):
assignment
=
bboxes_3d
.
points_in_boxes
(
points
).
long
()
points_mask
=
assignment
.
new_zeros
(
[
assignment
.
shape
[
0
],
num_bbox
+
1
])
assignment
[
assignment
==
-
1
]
=
num_bbox
points_mask
.
scatter_
(
1
,
assignment
.
unsqueeze
(
1
),
1
)
points_mask
=
points_mask
[:,
:
-
1
]
assignment
[
assignment
==
num_bbox
]
=
num_bbox
-
1
elif
isinstance
(
bboxes_3d
,
DepthInstance3DBoxes
):
points_mask
=
bboxes_3d
.
points_in_boxes
(
points
)
if
isinstance
(
bboxes_3d
,
(
LiDARInstance3DBoxes
,
DepthInstance3DBoxes
)):
points_mask
=
bboxes_3d
.
points_in_boxes_batch
(
points
)
assignment
=
points_mask
.
argmax
(
dim
=-
1
)
else
:
raise
NotImplementedError
(
'Unsupported bbox type!'
)
...
...
Prev
1
2
3
4
5
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment