Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
96cab60d
Unverified
Commit
96cab60d
authored
Sep 21, 2020
by
Yuefeng Wu
Committed by
GitHub
Sep 21, 2020
Browse files
[Fix]: fix multi-batch show in detectors (#120)
parent
1cd50481
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
83 additions
and
70 deletions
+83
-70
mmdet3d/models/detectors/base.py
mmdet3d/models/detectors/base.py
+42
-36
mmdet3d/models/detectors/mvx_two_stage.py
mmdet3d/models/detectors/mvx_two_stage.py
+41
-34
No files found.
mmdet3d/models/detectors/base.py
View file @
96cab60d
...
...
@@ -48,8 +48,8 @@ class Base3DDetector(BaseDetector):
Note this setting will change the expected inputs. When
`return_loss=True`, img and img_metas are single-nested (i.e.
torch.Tensor and list[dict]), and when `resturn_loss=False`, img
and
img_metas should be double nested (i.e. list[torch.Tensor],
torch.Tensor and list[dict]), and when `resturn_loss=False`, img
and
img_metas should be double nested (i.e. list[torch.Tensor],
list[list[dict]]), with the outer list indicating test time
augmentations.
"""
...
...
@@ -62,42 +62,48 @@ class Base3DDetector(BaseDetector):
"""Results visualization.
Args:
data (dict): Input points and the information of the sample.
result (dict): Prediction results.
data (
list[
dict
]
): Input points and the information of the sample.
result (
list[
dict
]
): Prediction results.
out_dir (str): Output directory of visualization result.
"""
if
isinstance
(
data
[
'points'
][
0
],
DC
):
points
=
data
[
'points'
][
0
].
_data
[
0
][
0
].
numpy
()
elif
mmcv
.
is_list_of
(
data
[
'points'
][
0
],
torch
.
Tensor
):
points
=
data
[
'points'
][
0
][
0
]
else
:
ValueError
(
f
"Unsupported data type
{
type
(
data
[
'points'
][
0
])
}
"
f
'for visualization!'
)
if
isinstance
(
data
[
'img_metas'
][
0
],
DC
):
pts_filename
=
data
[
'img_metas'
][
0
].
_data
[
0
][
0
][
'pts_filename'
]
box_mode_3d
=
data
[
'img_metas'
][
0
].
_data
[
0
][
0
][
'box_mode_3d'
]
elif
mmcv
.
is_list_of
(
data
[
'img_metas'
][
0
],
dict
):
pts_filename
=
data
[
'img_metas'
][
0
][
0
][
'pts_filename'
]
box_mode_3d
=
data
[
'img_metas'
][
0
][
0
][
'box_mode_3d'
]
else
:
ValueError
(
f
"Unsupported data type
{
type
(
data
[
'img_metas'
][
0
])
}
"
f
'for visualization!'
)
file_name
=
osp
.
split
(
pts_filename
)[
-
1
].
split
(
'.'
)[
0
]
for
batch_id
in
range
(
len
(
result
)):
if
isinstance
(
data
[
'points'
][
0
],
DC
):
points
=
data
[
'points'
][
0
].
_data
[
0
][
batch_id
].
numpy
()
elif
mmcv
.
is_list_of
(
data
[
'points'
][
0
],
torch
.
Tensor
):
points
=
data
[
'points'
][
0
][
batch_id
]
else
:
ValueError
(
f
"Unsupported data type
{
type
(
data
[
'points'
][
0
])
}
"
f
'for visualization!'
)
if
isinstance
(
data
[
'img_metas'
][
0
],
DC
):
pts_filename
=
data
[
'img_metas'
][
0
].
_data
[
0
][
batch_id
][
'pts_filename'
]
box_mode_3d
=
data
[
'img_metas'
][
0
].
_data
[
0
][
batch_id
][
'box_mode_3d'
]
elif
mmcv
.
is_list_of
(
data
[
'img_metas'
][
0
],
dict
):
pts_filename
=
data
[
'img_metas'
][
0
][
batch_id
][
'pts_filename'
]
box_mode_3d
=
data
[
'img_metas'
][
0
][
batch_id
][
'box_mode_3d'
]
else
:
ValueError
(
f
"Unsupported data type
{
type
(
data
[
'img_metas'
][
0
])
}
"
f
'for visualization!'
)
file_name
=
osp
.
split
(
pts_filename
)[
-
1
].
split
(
'.'
)[
0
]
assert
out_dir
is
not
None
,
'Expect out_dir, got none.'
assert
out_dir
is
not
None
,
'Expect out_dir, got none.'
pred_bboxes
=
copy
.
deepcopy
(
result
[
'boxes_3d'
].
tensor
.
numpy
())
# for now we convert points into depth mode
if
box_mode_3d
==
Box3DMode
.
DEPTH
:
pred_bboxes
[...,
2
]
+=
pred_bboxes
[...,
5
]
/
2
elif
box_mode_3d
==
Box3DMode
.
CAM
or
box_mode_3d
==
Box3DMode
.
LIDAR
:
points
=
points
[...,
[
1
,
0
,
2
]]
points
[...,
0
]
*=
-
1
pred_bboxes
=
Box3DMode
.
convert
(
pred_bboxes
,
box_mode_3d
,
Box3DMode
.
DEPTH
)
pred_bboxes
[...,
2
]
+=
pred_bboxes
[...,
5
]
/
2
else
:
ValueError
(
f
'Unsupported box_mode_3d
{
box_mode_3d
}
for convertion!'
)
pred_bboxes
=
copy
.
deepcopy
(
result
[
batch_id
][
'boxes_3d'
].
tensor
.
numpy
())
# for now we convert points into depth mode
if
box_mode_3d
==
Box3DMode
.
DEPTH
:
pred_bboxes
[...,
2
]
+=
pred_bboxes
[...,
5
]
/
2
elif
(
box_mode_3d
==
Box3DMode
.
CAM
)
or
(
box_mode_3d
==
Box3DMode
.
LIDAR
):
points
=
points
[...,
[
1
,
0
,
2
]]
points
[...,
0
]
*=
-
1
pred_bboxes
=
Box3DMode
.
convert
(
pred_bboxes
,
box_mode_3d
,
Box3DMode
.
DEPTH
)
pred_bboxes
[...,
2
]
+=
pred_bboxes
[...,
5
]
/
2
else
:
ValueError
(
f
'Unsupported box_mode_3d
{
box_mode_3d
}
for convertion!'
)
show_result
(
points
,
None
,
pred_bboxes
,
out_dir
,
file_name
)
show_result
(
points
,
None
,
pred_bboxes
,
out_dir
,
file_name
)
mmdet3d/models/detectors/mvx_two_stage.py
View file @
96cab60d
import
copy
import
mmcv
import
torch
from
mmcv.parallel
import
DataContainer
as
DC
...
...
@@ -454,37 +455,43 @@ class MVXTwoStageDetector(Base3DDetector):
result (dict): Prediction results.
out_dir (str): Output directory of visualization result.
"""
if
isinstance
(
data
[
'points'
][
0
],
DC
):
points
=
data
[
'points'
][
0
].
_data
[
0
][
0
].
numpy
()
elif
mmcv
.
is_list_of
(
data
[
'points'
][
0
],
torch
.
Tensor
):
points
=
data
[
'points'
][
0
][
0
]
else
:
ValueError
(
f
"Unsupported data type
{
type
(
data
[
'points'
][
0
])
}
"
f
'for visualization!'
)
if
isinstance
(
data
[
'img_metas'
][
0
],
DC
):
pts_filename
=
data
[
'img_metas'
][
0
].
_data
[
0
][
0
][
'pts_filename'
]
box_mode_3d
=
data
[
'img_metas'
][
0
].
_data
[
0
][
0
][
'box_mode_3d'
]
elif
mmcv
.
is_list_of
(
data
[
'img_metas'
][
0
],
dict
):
pts_filename
=
data
[
'img_metas'
][
0
][
0
][
'pts_filename'
]
box_mode_3d
=
data
[
'img_metas'
][
0
][
0
][
'box_mode_3d'
]
else
:
ValueError
(
f
"Unsupported data type
{
type
(
data
[
'img_metas'
][
0
])
}
"
f
'for visualization!'
)
file_name
=
osp
.
split
(
pts_filename
)[
-
1
].
split
(
'.'
)[
0
]
assert
out_dir
is
not
None
,
'Expect out_dir, got none.'
inds
=
result
[
'pts_bbox'
][
'scores_3d'
]
>
0.1
pred_bboxes
=
result
[
'pts_bbox'
][
'boxes_3d'
][
inds
].
tensor
.
numpy
()
# for now we convert points into depth mode
if
box_mode_3d
==
Box3DMode
.
DEPTH
:
pred_bboxes
[...,
2
]
+=
pred_bboxes
[...,
5
]
/
2
elif
box_mode_3d
==
Box3DMode
.
CAM
or
box_mode_3d
==
Box3DMode
.
LIDAR
:
points
=
points
[...,
[
1
,
0
,
2
]]
points
[...,
0
]
*=
-
1
pred_bboxes
=
Box3DMode
.
convert
(
pred_bboxes
,
box_mode_3d
,
Box3DMode
.
DEPTH
)
pred_bboxes
[...,
2
]
+=
pred_bboxes
[...,
5
]
/
2
else
:
ValueError
(
f
'Unsupported box_mode_3d
{
box_mode_3d
}
for convertion!'
)
show_result
(
points
,
None
,
pred_bboxes
,
out_dir
,
file_name
)
for
batch_id
in
range
(
len
(
result
)):
if
isinstance
(
data
[
'points'
][
0
],
DC
):
points
=
data
[
'points'
][
0
].
_data
[
0
][
batch_id
].
numpy
()
elif
mmcv
.
is_list_of
(
data
[
'points'
][
0
],
torch
.
Tensor
):
points
=
data
[
'points'
][
0
][
batch_id
]
else
:
ValueError
(
f
"Unsupported data type
{
type
(
data
[
'points'
][
0
])
}
"
f
'for visualization!'
)
if
isinstance
(
data
[
'img_metas'
][
0
],
DC
):
pts_filename
=
data
[
'img_metas'
][
0
].
_data
[
0
][
batch_id
][
'pts_filename'
]
box_mode_3d
=
data
[
'img_metas'
][
0
].
_data
[
0
][
batch_id
][
'box_mode_3d'
]
elif
mmcv
.
is_list_of
(
data
[
'img_metas'
][
0
],
dict
):
pts_filename
=
data
[
'img_metas'
][
0
][
batch_id
][
'pts_filename'
]
box_mode_3d
=
data
[
'img_metas'
][
0
][
batch_id
][
'box_mode_3d'
]
else
:
ValueError
(
f
"Unsupported data type
{
type
(
data
[
'img_metas'
][
0
])
}
"
f
'for visualization!'
)
file_name
=
osp
.
split
(
pts_filename
)[
-
1
].
split
(
'.'
)[
0
]
assert
out_dir
is
not
None
,
'Expect out_dir, got none.'
inds
=
result
[
batch_id
][
'pts_bbox'
][
'scores_3d'
]
>
0.1
pred_bboxes
=
copy
.
deepcopy
(
result
[
batch_id
][
'pts_bbox'
][
'boxes_3d'
][
inds
].
tensor
.
numpy
())
# for now we convert points into depth mode
if
box_mode_3d
==
Box3DMode
.
DEPTH
:
pred_bboxes
[...,
2
]
+=
pred_bboxes
[...,
5
]
/
2
elif
(
box_mode_3d
==
Box3DMode
.
CAM
)
or
(
box_mode_3d
==
Box3DMode
.
LIDAR
):
points
=
points
[...,
[
1
,
0
,
2
]]
points
[...,
0
]
*=
-
1
pred_bboxes
=
Box3DMode
.
convert
(
pred_bboxes
,
box_mode_3d
,
Box3DMode
.
DEPTH
)
pred_bboxes
[...,
2
]
+=
pred_bboxes
[...,
5
]
/
2
else
:
ValueError
(
f
'Unsupported box_mode_3d
{
box_mode_3d
}
for convertion!'
)
show_result
(
points
,
None
,
pred_bboxes
,
out_dir
,
file_name
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment