Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
cbc2491f
"docs/vscode:/vscode.git/clone" did not exist on "efb73fcb598fbb93c6cae7d6667a58b373b0de96"
Unverified
Commit
cbc2491f
authored
Oct 13, 2021
by
Tai-Wang
Committed by
GitHub
Oct 13, 2021
Browse files
Add code-spell pre-commit hook and fix typos (#995)
parent
6b1602f1
Changes
79
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
55 additions
and
55 deletions
+55
-55
mmdet3d/datasets/pipelines/transforms_3d.py
mmdet3d/datasets/pipelines/transforms_3d.py
+11
-11
mmdet3d/datasets/waymo_dataset.py
mmdet3d/datasets/waymo_dataset.py
+1
-1
mmdet3d/models/backbones/nostem_regnet.py
mmdet3d/models/backbones/nostem_regnet.py
+1
-1
mmdet3d/models/dense_heads/anchor_free_mono3d_head.py
mmdet3d/models/dense_heads/anchor_free_mono3d_head.py
+1
-1
mmdet3d/models/dense_heads/centerpoint_head.py
mmdet3d/models/dense_heads/centerpoint_head.py
+4
-4
mmdet3d/models/dense_heads/fcos_mono3d_head.py
mmdet3d/models/dense_heads/fcos_mono3d_head.py
+3
-3
mmdet3d/models/dense_heads/groupfree3d_head.py
mmdet3d/models/dense_heads/groupfree3d_head.py
+2
-2
mmdet3d/models/dense_heads/parta2_rpn_head.py
mmdet3d/models/dense_heads/parta2_rpn_head.py
+1
-1
mmdet3d/models/dense_heads/smoke_mono3d_head.py
mmdet3d/models/dense_heads/smoke_mono3d_head.py
+17
-17
mmdet3d/models/dense_heads/train_mixins.py
mmdet3d/models/dense_heads/train_mixins.py
+1
-1
mmdet3d/models/dense_heads/vote_head.py
mmdet3d/models/dense_heads/vote_head.py
+1
-1
mmdet3d/models/detectors/base.py
mmdet3d/models/detectors/base.py
+1
-1
mmdet3d/models/detectors/mvx_two_stage.py
mmdet3d/models/detectors/mvx_two_stage.py
+1
-1
mmdet3d/models/model_utils/transformer.py
mmdet3d/models/model_utils/transformer.py
+1
-1
mmdet3d/models/necks/dla_neck.py
mmdet3d/models/necks/dla_neck.py
+1
-1
mmdet3d/models/roi_heads/bbox_heads/h3d_bbox_head.py
mmdet3d/models/roi_heads/bbox_heads/h3d_bbox_head.py
+2
-2
mmdet3d/models/roi_heads/bbox_heads/parta2_bbox_head.py
mmdet3d/models/roi_heads/bbox_heads/parta2_bbox_head.py
+1
-1
mmdet3d/models/roi_heads/mask_heads/primitive_head.py
mmdet3d/models/roi_heads/mask_heads/primitive_head.py
+2
-2
mmdet3d/models/voxel_encoders/pillar_encoder.py
mmdet3d/models/voxel_encoders/pillar_encoder.py
+1
-1
mmdet3d/models/voxel_encoders/voxel_encoder.py
mmdet3d/models/voxel_encoders/voxel_encoder.py
+2
-2
No files found.
mmdet3d/datasets/pipelines/transforms_3d.py
View file @
cbc2491f
...
@@ -151,7 +151,7 @@ class RandomFlip3D(RandomFlip):
...
@@ -151,7 +151,7 @@ class RandomFlip3D(RandomFlip):
'pcd_horizontal_flip' and 'pcd_vertical_flip' keys are added
'pcd_horizontal_flip' and 'pcd_vertical_flip' keys are added
into result dict.
into result dict.
"""
"""
# f
i
lp 2D image and its annotations
# fl
i
p 2D image and its annotations
super
(
RandomFlip3D
,
self
).
__call__
(
input_dict
)
super
(
RandomFlip3D
,
self
).
__call__
(
input_dict
)
if
self
.
sync_2d
:
if
self
.
sync_2d
:
...
@@ -921,11 +921,11 @@ class PointSample(object):
...
@@ -921,11 +921,11 @@ class PointSample(object):
"""
"""
points
=
results
[
'points'
]
points
=
results
[
'points'
]
# Points in Camera coord can provide the depth information.
# Points in Camera coord can provide the depth information.
# TODO: Need to suport distance-based sampling for other coord system.
# TODO: Need to sup
p
ort distance-based sampling for other coord system.
if
self
.
sample_range
is
not
None
:
if
self
.
sample_range
is
not
None
:
from
mmdet3d.core.points
import
CameraPoints
from
mmdet3d.core.points
import
CameraPoints
assert
isinstance
(
points
,
CameraPoints
),
\
assert
isinstance
(
points
,
CameraPoints
),
'Sampling based on'
\
'
Sampling based on
distance is only appliable for CAMERA coord'
'distance is only appli
c
able for CAMERA coord'
points
,
choices
=
self
.
_points_random_sampling
(
points
,
choices
=
self
.
_points_random_sampling
(
points
,
points
,
self
.
num_points
,
self
.
num_points
,
...
@@ -1293,7 +1293,7 @@ class VoxelBasedPointSampler(object):
...
@@ -1293,7 +1293,7 @@ class VoxelBasedPointSampler(object):
Args:
Args:
cur_sweep_cfg (dict): Config for sampling current points.
cur_sweep_cfg (dict): Config for sampling current points.
prev_sweep_cfg (dict): Config for sampling previous points.
prev_sweep_cfg (dict): Config for sampling previous points.
time_dim (int): Index that indicate the time dimen
t
ion
time_dim (int): Index that indicate the time dimen
s
ion
for input points.
for input points.
"""
"""
...
@@ -1317,7 +1317,7 @@ class VoxelBasedPointSampler(object):
...
@@ -1317,7 +1317,7 @@ class VoxelBasedPointSampler(object):
points (np.ndarray): Points subset to be sampled.
points (np.ndarray): Points subset to be sampled.
sampler (VoxelGenerator): Voxel based sampler for
sampler (VoxelGenerator): Voxel based sampler for
each points subset.
each points subset.
point_dim (int): The dimen
t
ion of each points
point_dim (int): The dimen
s
ion of each points
Returns:
Returns:
np.ndarray: Sampled points.
np.ndarray: Sampled points.
...
@@ -1398,7 +1398,7 @@ class VoxelBasedPointSampler(object):
...
@@ -1398,7 +1398,7 @@ class VoxelBasedPointSampler(object):
points_numpy
=
points_numpy
.
squeeze
(
1
)
points_numpy
=
points_numpy
.
squeeze
(
1
)
results
[
'points'
]
=
points
.
new_point
(
points_numpy
[...,
:
original_dim
])
results
[
'points'
]
=
points
.
new_point
(
points_numpy
[...,
:
original_dim
])
# Restore the correspo
i
nding seg and mask fields
# Restore the corresponding seg and mask fields
for
key
,
dim_index
in
map_fields2dim
:
for
key
,
dim_index
in
map_fields2dim
:
results
[
key
]
=
points_numpy
[...,
dim_index
]
results
[
key
]
=
points_numpy
[...,
dim_index
]
...
@@ -1551,7 +1551,7 @@ class AffineResize(object):
...
@@ -1551,7 +1551,7 @@ class AffineResize(object):
results
[
key
]
=
bboxes
results
[
key
]
=
bboxes
def
_affine_transform
(
self
,
points
,
matrix
):
def
_affine_transform
(
self
,
points
,
matrix
):
"""Affine transform bbox points to input i
a
mge.
"""Affine transform bbox points to input im
a
ge.
Args:
Args:
points (np.ndarray): Points to be transformed.
points (np.ndarray): Points to be transformed.
...
@@ -1605,10 +1605,10 @@ class AffineResize(object):
...
@@ -1605,10 +1605,10 @@ class AffineResize(object):
return
matrix
.
astype
(
np
.
float32
)
return
matrix
.
astype
(
np
.
float32
)
def
_get_ref_point
(
self
,
ref_point1
,
ref_point2
):
def
_get_ref_point
(
self
,
ref_point1
,
ref_point2
):
"""Get reference point to calculate affine transf
r
om matrix.
"""Get reference point to calculate affine transfo
r
m matrix.
While using opencv to calculate the affine matrix, we need at least
While using opencv to calculate the affine matrix, we need at least
three corresponding points sep
e
rately on original image and target
three corresponding points sep
a
rately on original image and target
image. Here we use two points to get the the third reference point.
image. Here we use two points to get the the third reference point.
"""
"""
d
=
ref_point1
-
ref_point2
d
=
ref_point1
-
ref_point2
...
@@ -1628,7 +1628,7 @@ class RandomShiftScale(object):
...
@@ -1628,7 +1628,7 @@ class RandomShiftScale(object):
Different from the normal shift and scale function, it doesn't
Different from the normal shift and scale function, it doesn't
directly shift or scale image. It can record the shift and scale
directly shift or scale image. It can record the shift and scale
infos into loading pipelines. It's des
g
ined to be used with
infos into loading pipelines. It's desi
g
ned to be used with
AffineResize together.
AffineResize together.
Args:
Args:
...
...
mmdet3d/datasets/waymo_dataset.py
View file @
cbc2491f
...
@@ -234,7 +234,7 @@ class WaymoDataset(KittiDataset):
...
@@ -234,7 +234,7 @@ class WaymoDataset(KittiDataset):
pklfile_prefix (str, optional): The prefix of pkl files including
pklfile_prefix (str, optional): The prefix of pkl files including
the file path and the prefix of filename, e.g., "a/b/prefix".
the file path and the prefix of filename, e.g., "a/b/prefix".
If not specified, a temp file will be created. Default: None.
If not specified, a temp file will be created. Default: None.
submission_prefix (str, optional): The prefix of submission data
s
.
submission_prefix (str, optional): The prefix of submission data.
If not specified, the submission data will not be generated.
If not specified, the submission data will not be generated.
show (bool, optional): Whether to visualize.
show (bool, optional): Whether to visualize.
Default: False.
Default: False.
...
...
mmdet3d/models/backbones/nostem_regnet.py
View file @
cbc2491f
...
@@ -16,7 +16,7 @@ class NoStemRegNet(RegNet):
...
@@ -16,7 +16,7 @@ class NoStemRegNet(RegNet):
- wm (float): Quantization parameter to quantize the width.
- wm (float): Quantization parameter to quantize the width.
- depth (int): Depth of the backbone.
- depth (int): Depth of the backbone.
- group_w (int): Width of group.
- group_w (int): Width of group.
- bot_mul (float): Bottleneck ratio, i.e. expansion of bottlneck.
- bot_mul (float): Bottleneck ratio, i.e. expansion of bottl
e
neck.
strides (Sequence[int]): Strides of the first block of each stage.
strides (Sequence[int]): Strides of the first block of each stage.
base_channels (int): Base channels after stem layer.
base_channels (int): Base channels after stem layer.
in_channels (int): Number of input image channels. Normally 3.
in_channels (int): Number of input image channels. Normally 3.
...
...
mmdet3d/models/dense_heads/anchor_free_mono3d_head.py
View file @
cbc2491f
...
@@ -321,7 +321,7 @@ class AnchorFreeMono3DHead(BaseMono3DDenseHead):
...
@@ -321,7 +321,7 @@ class AnchorFreeMono3DHead(BaseMono3DDenseHead):
return
multi_apply
(
self
.
forward_single
,
feats
)[:
5
]
return
multi_apply
(
self
.
forward_single
,
feats
)[:
5
]
def
forward_single
(
self
,
x
):
def
forward_single
(
self
,
x
):
"""Forward features of a single scale lev
l
e.
"""Forward features of a single scale leve
l
.
Args:
Args:
x (Tensor): FPN feature maps of the specified stride.
x (Tensor): FPN feature maps of the specified stride.
...
...
mmdet3d/models/dense_heads/centerpoint_head.py
View file @
cbc2491f
...
@@ -23,8 +23,8 @@ class SeparateHead(BaseModule):
...
@@ -23,8 +23,8 @@ class SeparateHead(BaseModule):
heads (dict): Conv information.
heads (dict): Conv information.
head_conv (int, optional): Output channels.
head_conv (int, optional): Output channels.
Default: 64.
Default: 64.
final_kernal (int, optional): Kern
a
l size for the last conv layer.
final_kernal (int, optional): Kern
e
l size for the last conv layer.
De
a
fult: 1.
Def
a
ult: 1.
init_bias (float, optional): Initial bias. Default: -2.19.
init_bias (float, optional): Initial bias. Default: -2.19.
conv_cfg (dict, optional): Config of conv layer.
conv_cfg (dict, optional): Config of conv layer.
Default: dict(type='Conv2d')
Default: dict(type='Conv2d')
...
@@ -136,8 +136,8 @@ class DCNSeparateHead(BaseModule):
...
@@ -136,8 +136,8 @@ class DCNSeparateHead(BaseModule):
dcn_config (dict): Config of dcn layer.
dcn_config (dict): Config of dcn layer.
head_conv (int, optional): Output channels.
head_conv (int, optional): Output channels.
Default: 64.
Default: 64.
final_kernal (int, optional): Kern
a
l size for the last conv
final_kernal (int, optional): Kern
e
l size for the last conv
layer. De
a
fult: 1.
layer. Def
a
ult: 1.
init_bias (float, optional): Initial bias. Default: -2.19.
init_bias (float, optional): Initial bias. Default: -2.19.
conv_cfg (dict, optional): Config of conv layer.
conv_cfg (dict, optional): Config of conv layer.
Default: dict(type='Conv2d')
Default: dict(type='Conv2d')
...
...
mmdet3d/models/dense_heads/fcos_mono3d_head.py
View file @
cbc2491f
...
@@ -151,7 +151,7 @@ class FCOSMono3DHead(AnchorFreeMono3DHead):
...
@@ -151,7 +151,7 @@ class FCOSMono3DHead(AnchorFreeMono3DHead):
self
.
strides
)[:
5
]
self
.
strides
)[:
5
]
def
forward_single
(
self
,
x
,
scale
,
stride
):
def
forward_single
(
self
,
x
,
scale
,
stride
):
"""Forward features of a single scale lev
l
e.
"""Forward features of a single scale leve
l
.
Args:
Args:
x (Tensor): FPN feature maps of the specified stride.
x (Tensor): FPN feature maps of the specified stride.
...
@@ -691,7 +691,7 @@ class FCOSMono3DHead(AnchorFreeMono3DHead):
...
@@ -691,7 +691,7 @@ class FCOSMono3DHead(AnchorFreeMono3DHead):
Args:
Args:
points (torch.Tensor): points in 2D images, [N, 3],
points (torch.Tensor): points in 2D images, [N, 3],
3 corresponds with x, y in the image and depth.
3 corresponds with x, y in the image and depth.
view (np.ndarray): camera in
s
trinsic, [3, 3]
view (np.ndarray): camera intrinsic, [3, 3]
Returns:
Returns:
torch.Tensor: points in 3D space. [N, 3],
torch.Tensor: points in 3D space. [N, 3],
...
@@ -713,7 +713,7 @@ class FCOSMono3DHead(AnchorFreeMono3DHead):
...
@@ -713,7 +713,7 @@ class FCOSMono3DHead(AnchorFreeMono3DHead):
viewpad
[:
view
.
shape
[
0
],
:
view
.
shape
[
1
]]
=
points2D
.
new_tensor
(
view
)
viewpad
[:
view
.
shape
[
0
],
:
view
.
shape
[
1
]]
=
points2D
.
new_tensor
(
view
)
inv_viewpad
=
torch
.
inverse
(
viewpad
).
transpose
(
0
,
1
)
inv_viewpad
=
torch
.
inverse
(
viewpad
).
transpose
(
0
,
1
)
# Do operation in homogenous coordinates.
# Do operation in homogen
e
ous coordinates.
nbr_points
=
unnorm_points2D
.
shape
[
0
]
nbr_points
=
unnorm_points2D
.
shape
[
0
]
homo_points2D
=
torch
.
cat
(
homo_points2D
=
torch
.
cat
(
[
unnorm_points2D
,
[
unnorm_points2D
,
...
...
mmdet3d/models/dense_heads/groupfree3d_head.py
View file @
cbc2491f
...
@@ -299,7 +299,7 @@ class GroupFree3DHead(BaseModule):
...
@@ -299,7 +299,7 @@ class GroupFree3DHead(BaseModule):
"""Forward pass.
"""Forward pass.
Note:
Note:
The forward of GroupFree3DHead is d
e
vided into 2 steps:
The forward of GroupFree3DHead is d
i
vided into 2 steps:
1. Initial object candidates sampling.
1. Initial object candidates sampling.
2. Iterative object box prediction by transformer decoder.
2. Iterative object box prediction by transformer decoder.
...
@@ -880,7 +880,7 @@ class GroupFree3DHead(BaseModule):
...
@@ -880,7 +880,7 @@ class GroupFree3DHead(BaseModule):
Returns:
Returns:
list[tuple[torch.Tensor]]: Bounding boxes, scores and labels.
list[tuple[torch.Tensor]]: Bounding boxes, scores and labels.
"""
"""
# support multi-stage predic
i
tons
# support multi-stage predict
i
ons
assert
self
.
test_cfg
[
'prediction_stages'
]
in
\
assert
self
.
test_cfg
[
'prediction_stages'
]
in
\
[
'last'
,
'all'
,
'last_three'
]
[
'last'
,
'all'
,
'last_three'
]
...
...
mmdet3d/models/dense_heads/parta2_rpn_head.py
View file @
cbc2491f
...
@@ -207,7 +207,7 @@ class PartA2RPNHead(Anchor3DHead):
...
@@ -207,7 +207,7 @@ class PartA2RPNHead(Anchor3DHead):
mlvl_dir_scores
=
torch
.
cat
(
mlvl_dir_scores
)
mlvl_dir_scores
=
torch
.
cat
(
mlvl_dir_scores
)
# shape [k, num_class] before sigmoid
# shape [k, num_class] before sigmoid
# PartA2 need to keep raw classification score
# PartA2 need to keep raw classification score
# becase the bbox head in the second stage does not have
# beca
u
se the bbox head in the second stage does not have
# classification branch,
# classification branch,
# roi head need this score as classification score
# roi head need this score as classification score
mlvl_cls_score
=
torch
.
cat
(
mlvl_cls_score
)
mlvl_cls_score
=
torch
.
cat
(
mlvl_cls_score
)
...
...
mmdet3d/models/dense_heads/smoke_mono3d_head.py
View file @
cbc2491f
...
@@ -25,9 +25,9 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
...
@@ -25,9 +25,9 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
num_classes (int): Number of categories excluding the background
num_classes (int): Number of categories excluding the background
category.
category.
in_channels (int): Number of channels in the input feature map.
in_channels (int): Number of channels in the input feature map.
dim_channel (list[int]): inde
x
s of dimension offset preds in
dim_channel (list[int]): ind
ic
es of dimension offset preds in
regression heatmap channels.
regression heatmap channels.
ori_channel (list[int]): inde
x
s of orientation offset pred in
ori_channel (list[int]): ind
ic
es of orientation offset pred in
regression heatmap channels.
regression heatmap channels.
bbox_coder (:obj:`CameraInstance3DBoxes`): Bbox coder
bbox_coder (:obj:`CameraInstance3DBoxes`): Bbox coder
for encoding and decoding boxes.
for encoding and decoding boxes.
...
@@ -221,12 +221,12 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
...
@@ -221,12 +221,12 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
return
batch_bboxes
,
batch_scores
,
batch_topk_labels
return
batch_bboxes
,
batch_scores
,
batch_topk_labels
def
get_predictions
(
self
,
labels3d
,
centers2d
,
gt_locations
,
gt_dimensions
,
def
get_predictions
(
self
,
labels3d
,
centers2d
,
gt_locations
,
gt_dimensions
,
gt_orientations
,
inde
x
s
,
img_metas
,
pred_reg
):
gt_orientations
,
ind
ic
es
,
img_metas
,
pred_reg
):
"""Prepare predictions for computing loss.
"""Prepare predictions for computing loss.
Args:
Args:
labels3d (Tensor): Labels of each 3D box.
labels3d (Tensor): Labels of each 3D box.
sh
p
ae (B, max_objs, )
sha
p
e (B, max_objs, )
centers2d (Tensor): Coords of each projected 3D box
centers2d (Tensor): Coords of each projected 3D box
center on image. shape (B * max_objs, 2)
center on image. shape (B * max_objs, 2)
gt_locations (Tensor): Coords of each 3D box's location.
gt_locations (Tensor): Coords of each 3D box's location.
...
@@ -235,7 +235,7 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
...
@@ -235,7 +235,7 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
shape (N, 3)
shape (N, 3)
gt_orientations (Tensor): Orientation(yaw) of each 3D box.
gt_orientations (Tensor): Orientation(yaw) of each 3D box.
shape (N, 1)
shape (N, 1)
inde
x
s (Tensor): Inde
x
s of the existence of the 3D box.
ind
ic
es (Tensor): Ind
ic
es of the existence of the 3D box.
shape (B * max_objs, )
shape (B * max_objs, )
img_metas (list[dict]): Meta information of each image,
img_metas (list[dict]): Meta information of each image,
e.g., image size, scaling factor, etc.
e.g., image size, scaling factor, etc.
...
@@ -247,7 +247,7 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
...
@@ -247,7 +247,7 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
- bbox3d_yaws (:obj:`CameraInstance3DBoxes`):
- bbox3d_yaws (:obj:`CameraInstance3DBoxes`):
bbox calculated using pred orientations.
bbox calculated using pred orientations.
- bbox3d_dims (:obj:`CameraInstance3DBoxes`):
- bbox3d_dims (:obj:`CameraInstance3DBoxes`):
bbox calculated using pred dimen
t
ions.
bbox calculated using pred dimen
s
ions.
- bbox3d_locs (:obj:`CameraInstance3DBoxes`):
- bbox3d_locs (:obj:`CameraInstance3DBoxes`):
bbox calculated using pred locations.
bbox calculated using pred locations.
"""
"""
...
@@ -269,12 +269,12 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
...
@@ -269,12 +269,12 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
pred_regression_pois
,
centers2d
,
labels3d
,
cam2imgs
,
trans_mats
,
pred_regression_pois
,
centers2d
,
labels3d
,
cam2imgs
,
trans_mats
,
gt_locations
)
gt_locations
)
locations
,
dimensions
,
orientations
=
locations
[
inde
x
s
],
dimensions
[
locations
,
dimensions
,
orientations
=
locations
[
ind
ic
es
],
dimensions
[
inde
x
s
],
orientations
[
inde
x
s
]
ind
ic
es
],
orientations
[
ind
ic
es
]
locations
[:,
1
]
+=
dimensions
[:,
1
]
/
2
locations
[:,
1
]
+=
dimensions
[:,
1
]
/
2
gt_locations
=
gt_locations
[
inde
x
s
]
gt_locations
=
gt_locations
[
ind
ic
es
]
assert
len
(
locations
)
==
len
(
gt_locations
)
assert
len
(
locations
)
==
len
(
gt_locations
)
assert
len
(
dimensions
)
==
len
(
gt_dimensions
)
assert
len
(
dimensions
)
==
len
(
gt_dimensions
)
...
@@ -293,7 +293,7 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
...
@@ -293,7 +293,7 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
def
get_targets
(
self
,
gt_bboxes
,
gt_labels
,
gt_bboxes_3d
,
gt_labels_3d
,
def
get_targets
(
self
,
gt_bboxes
,
gt_labels
,
gt_bboxes_3d
,
gt_labels_3d
,
centers2d
,
feat_shape
,
img_shape
,
img_metas
):
centers2d
,
feat_shape
,
img_shape
,
img_metas
):
"""Get training targets for batch images.
"""Get training targets for batch images.
``
Args:
Args:
gt_bboxes (list[Tensor]): Ground truth bboxes of each image,
gt_bboxes (list[Tensor]): Ground truth bboxes of each image,
shape (num_gt, 4).
shape (num_gt, 4).
...
@@ -318,10 +318,10 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
...
@@ -318,10 +318,10 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
- gt_centers2d (Tensor): Coords of each projected 3D box
- gt_centers2d (Tensor): Coords of each projected 3D box
center on image. shape (B * max_objs, 2)
center on image. shape (B * max_objs, 2)
- gt_labels3d (Tensor): Labels of each 3D box.
- gt_labels3d (Tensor): Labels of each 3D box.
sh
p
ae (B, max_objs, )
sha
p
e (B, max_objs, )
- inde
x
s (Tensor): Inde
x
s of the existence of the 3D box.
- ind
ic
es (Tensor): Ind
ic
es of the existence of the 3D box.
shape (B * max_objs, )
shape (B * max_objs, )
- affine_inde
x
s (Tensor): Inde
x
s of the affine of the 3D box.
- affine_ind
ic
es (Tensor): Ind
ic
es of the affine of the 3D box.
shape (N, )
shape (N, )
- gt_locs (Tensor): Coords of each 3D box's location.
- gt_locs (Tensor): Coords of each 3D box's location.
shape (N, 3)
shape (N, 3)
...
@@ -417,8 +417,8 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
...
@@ -417,8 +417,8 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
target_labels
=
dict
(
target_labels
=
dict
(
gt_centers2d
=
batch_centers2d
.
long
(),
gt_centers2d
=
batch_centers2d
.
long
(),
gt_labels3d
=
batch_labels_3d
,
gt_labels3d
=
batch_labels_3d
,
inde
x
s
=
inds
,
ind
ic
es
=
inds
,
reg_inde
x
s
=
reg_inds
,
reg_ind
ic
es
=
reg_inds
,
gt_locs
=
batch_gt_locations
,
gt_locs
=
batch_gt_locations
,
gt_dims
=
gt_dimensions
,
gt_dims
=
gt_dimensions
,
gt_yaws
=
gt_orientations
,
gt_yaws
=
gt_orientations
,
...
@@ -487,14 +487,14 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
...
@@ -487,14 +487,14 @@ class SMOKEMono3DHead(AnchorFreeMono3DHead):
gt_locations
=
target_labels
[
'gt_locs'
],
gt_locations
=
target_labels
[
'gt_locs'
],
gt_dimensions
=
target_labels
[
'gt_dims'
],
gt_dimensions
=
target_labels
[
'gt_dims'
],
gt_orientations
=
target_labels
[
'gt_yaws'
],
gt_orientations
=
target_labels
[
'gt_yaws'
],
inde
x
s
=
target_labels
[
'inde
x
s'
],
ind
ic
es
=
target_labels
[
'ind
ic
es'
],
img_metas
=
img_metas
,
img_metas
=
img_metas
,
pred_reg
=
pred_reg
)
pred_reg
=
pred_reg
)
loss_cls
=
self
.
loss_cls
(
loss_cls
=
self
.
loss_cls
(
center2d_heatmap
,
center2d_heatmap_target
,
avg_factor
=
avg_factor
)
center2d_heatmap
,
center2d_heatmap_target
,
avg_factor
=
avg_factor
)
reg_inds
=
target_labels
[
'reg_inde
x
s'
]
reg_inds
=
target_labels
[
'reg_ind
ic
es'
]
loss_bbox_oris
=
self
.
loss_bbox
(
loss_bbox_oris
=
self
.
loss_bbox
(
pred_bboxes
[
'ori'
].
corners
[
reg_inds
,
...],
pred_bboxes
[
'ori'
].
corners
[
reg_inds
,
...],
...
...
mmdet3d/models/dense_heads/train_mixins.py
View file @
cbc2491f
...
@@ -35,7 +35,7 @@ class AnchorTrainMixin(object):
...
@@ -35,7 +35,7 @@ class AnchorTrainMixin(object):
tuple (list, list, list, list, list, list, int, int):
tuple (list, list, list, list, list, list, int, int):
Anchor targets, including labels, label weights,
Anchor targets, including labels, label weights,
bbox targets, bbox weights, direction targets,
bbox targets, bbox weights, direction targets,
direction weights, number of postive anchors and
direction weights, number of pos
i
tive anchors and
number of negative anchors.
number of negative anchors.
"""
"""
num_imgs
=
len
(
input_metas
)
num_imgs
=
len
(
input_metas
)
...
...
mmdet3d/models/dense_heads/vote_head.py
View file @
cbc2491f
...
@@ -136,7 +136,7 @@ class VoteHead(BaseModule):
...
@@ -136,7 +136,7 @@ class VoteHead(BaseModule):
"""Forward pass.
"""Forward pass.
Note:
Note:
The forward of VoteHead is d
e
vided into 4 steps:
The forward of VoteHead is d
i
vided into 4 steps:
1. Generate vote_points from seed_points.
1. Generate vote_points from seed_points.
2. Aggregate vote_points.
2. Aggregate vote_points.
...
...
mmdet3d/models/detectors/base.py
View file @
cbc2491f
...
@@ -103,6 +103,6 @@ class Base3DDetector(BaseDetector):
...
@@ -103,6 +103,6 @@ class Base3DDetector(BaseDetector):
Box3DMode
.
DEPTH
)
Box3DMode
.
DEPTH
)
elif
box_mode_3d
!=
Box3DMode
.
DEPTH
:
elif
box_mode_3d
!=
Box3DMode
.
DEPTH
:
ValueError
(
ValueError
(
f
'Unsupported box_mode_3d
{
box_mode_3d
}
for conver
t
ion!'
)
f
'Unsupported box_mode_3d
{
box_mode_3d
}
for conver
s
ion!'
)
pred_bboxes
=
pred_bboxes
.
tensor
.
cpu
().
numpy
()
pred_bboxes
=
pred_bboxes
.
tensor
.
cpu
().
numpy
()
show_result
(
points
,
None
,
pred_bboxes
,
out_dir
,
file_name
)
show_result
(
points
,
None
,
pred_bboxes
,
out_dir
,
file_name
)
mmdet3d/models/detectors/mvx_two_stage.py
View file @
cbc2491f
...
@@ -497,7 +497,7 @@ class MVXTwoStageDetector(Base3DDetector):
...
@@ -497,7 +497,7 @@ class MVXTwoStageDetector(Base3DDetector):
Box3DMode
.
DEPTH
)
Box3DMode
.
DEPTH
)
elif
box_mode_3d
!=
Box3DMode
.
DEPTH
:
elif
box_mode_3d
!=
Box3DMode
.
DEPTH
:
ValueError
(
ValueError
(
f
'Unsupported box_mode_3d
{
box_mode_3d
}
for conver
t
ion!'
)
f
'Unsupported box_mode_3d
{
box_mode_3d
}
for conver
s
ion!'
)
pred_bboxes
=
pred_bboxes
.
tensor
.
cpu
().
numpy
()
pred_bboxes
=
pred_bboxes
.
tensor
.
cpu
().
numpy
()
show_result
(
points
,
None
,
pred_bboxes
,
out_dir
,
file_name
)
show_result
(
points
,
None
,
pred_bboxes
,
out_dir
,
file_name
)
mmdet3d/models/model_utils/transformer.py
View file @
cbc2491f
...
@@ -132,7 +132,7 @@ class ConvBNPositionalEncoding(nn.Module):
...
@@ -132,7 +132,7 @@ class ConvBNPositionalEncoding(nn.Module):
xyz (Tensor): (B, N, 3) the coordinates to embed.
xyz (Tensor): (B, N, 3) the coordinates to embed.
Returns:
Returns:
Tensor: (B, num_pos_feats, N) the embeded position features.
Tensor: (B, num_pos_feats, N) the embed
d
ed position features.
"""
"""
xyz
=
xyz
.
permute
(
0
,
2
,
1
)
xyz
=
xyz
.
permute
(
0
,
2
,
1
)
position_embedding
=
self
.
position_embedding_head
(
xyz
)
position_embedding
=
self
.
position_embedding_head
(
xyz
)
...
...
mmdet3d/models/necks/dla_neck.py
View file @
cbc2491f
...
@@ -172,7 +172,7 @@ class DLANeck(BaseModule):
...
@@ -172,7 +172,7 @@ class DLANeck(BaseModule):
Args:
Args:
in_channels (list[int], optional): List of input channels
in_channels (list[int], optional): List of input channels
of multi-scale feature map.
of multi-scale feature map.
start_level (int, optio
a
nl): The scale level where upsampling
start_level (int, option
a
l): The scale level where upsampling
starts. Default: 2.
starts. Default: 2.
end_level (int, optional): The scale level where upsampling
end_level (int, optional): The scale level where upsampling
ends. Default: 5.
ends. Default: 5.
...
...
mmdet3d/models/roi_heads/bbox_heads/h3d_bbox_head.py
View file @
cbc2491f
...
@@ -20,7 +20,7 @@ class H3DBboxHead(BaseModule):
...
@@ -20,7 +20,7 @@ class H3DBboxHead(BaseModule):
Args:
Args:
num_classes (int): The number of classes.
num_classes (int): The number of classes.
suface_matching_cfg (dict): Config for suface primitive matching.
su
r
face_matching_cfg (dict): Config for su
r
face primitive matching.
line_matching_cfg (dict): Config for line primitive matching.
line_matching_cfg (dict): Config for line primitive matching.
bbox_coder (:obj:`BaseBBoxCoder`): Bbox coder for encoding and
bbox_coder (:obj:`BaseBBoxCoder`): Bbox coder for encoding and
decoding boxes.
decoding boxes.
...
@@ -36,7 +36,7 @@ class H3DBboxHead(BaseModule):
...
@@ -36,7 +36,7 @@ class H3DBboxHead(BaseModule):
primitive_refine_channels (tuple[int]): Convolution channels of
primitive_refine_channels (tuple[int]): Convolution channels of
prediction layer.
prediction layer.
upper_thresh (float): Threshold for line matching.
upper_thresh (float): Threshold for line matching.
surface_thresh (float): Threshold for suface matching.
surface_thresh (float): Threshold for su
r
face matching.
line_thresh (float): Threshold for line matching.
line_thresh (float): Threshold for line matching.
conv_cfg (dict): Config of convolution in prediction layer.
conv_cfg (dict): Config of convolution in prediction layer.
norm_cfg (dict): Config of BN in prediction layer.
norm_cfg (dict): Config of BN in prediction layer.
...
...
mmdet3d/models/roi_heads/bbox_heads/parta2_bbox_head.py
View file @
cbc2491f
...
@@ -574,7 +574,7 @@ class PartA2BboxHead(BaseModule):
...
@@ -574,7 +574,7 @@ class PartA2BboxHead(BaseModule):
box_preds (torch.Tensor): Predicted boxes in shape (N, 7+C).
box_preds (torch.Tensor): Predicted boxes in shape (N, 7+C).
score_thr (float): Threshold of scores.
score_thr (float): Threshold of scores.
nms_thr (float): Threshold for NMS.
nms_thr (float): Threshold for NMS.
input_meta (dict): Meta information
s
of the current sample.
input_meta (dict): Meta information of the current sample.
use_rotate_nms (bool, optional): Whether to use rotated nms.
use_rotate_nms (bool, optional): Whether to use rotated nms.
Defaults to True.
Defaults to True.
...
...
mmdet3d/models/roi_heads/mask_heads/primitive_head.py
View file @
cbc2491f
...
@@ -20,7 +20,7 @@ class PrimitiveHead(BaseModule):
...
@@ -20,7 +20,7 @@ class PrimitiveHead(BaseModule):
num_dims (int): The dimension of primitive semantic information.
num_dims (int): The dimension of primitive semantic information.
num_classes (int): The number of class.
num_classes (int): The number of class.
primitive_mode (str): The mode of primitive module,
primitive_mode (str): The mode of primitive module,
ava
l
iable mode ['z', 'xy', 'line'].
avai
l
able mode ['z', 'xy', 'line'].
bbox_coder (:obj:`BaseBBoxCoder`): Bbox coder for encoding and
bbox_coder (:obj:`BaseBBoxCoder`): Bbox coder for encoding and
decoding boxes.
decoding boxes.
train_cfg (dict): Config for training.
train_cfg (dict): Config for training.
...
@@ -30,7 +30,7 @@ class PrimitiveHead(BaseModule):
...
@@ -30,7 +30,7 @@ class PrimitiveHead(BaseModule):
feat_channels (tuple[int]): Convolution channels of
feat_channels (tuple[int]): Convolution channels of
prediction layer.
prediction layer.
upper_thresh (float): Threshold for line matching.
upper_thresh (float): Threshold for line matching.
surface_thresh (float): Threshold for suface matching.
surface_thresh (float): Threshold for su
r
face matching.
conv_cfg (dict): Config of convolution in prediction layer.
conv_cfg (dict): Config of convolution in prediction layer.
norm_cfg (dict): Config of BN in prediction layer.
norm_cfg (dict): Config of BN in prediction layer.
objectness_loss (dict): Config of objectness loss.
objectness_loss (dict): Config of objectness loss.
...
...
mmdet3d/models/voxel_encoders/pillar_encoder.py
View file @
cbc2491f
...
@@ -233,7 +233,7 @@ class DynamicPillarFeatureNet(PillarFeatureNet):
...
@@ -233,7 +233,7 @@ class DynamicPillarFeatureNet(PillarFeatureNet):
Returns:
Returns:
torch.Tensor: Corresponding voxel centers of each points, shape
torch.Tensor: Corresponding voxel centers of each points, shape
(M, C), where M is the num
v
er of points.
(M, C), where M is the num
b
er of points.
"""
"""
# Step 1: scatter voxel into canvas
# Step 1: scatter voxel into canvas
# Calculate necessary things for canvas creation
# Calculate necessary things for canvas creation
...
...
mmdet3d/models/voxel_encoders/voxel_encoder.py
View file @
cbc2491f
...
@@ -232,7 +232,7 @@ class DynamicVFE(nn.Module):
...
@@ -232,7 +232,7 @@ class DynamicVFE(nn.Module):
coors (torch.Tensor): Coordinates of voxels, shape is Nx(1+NDim).
coors (torch.Tensor): Coordinates of voxels, shape is Nx(1+NDim).
points (list[torch.Tensor], optional): Raw points used to guide the
points (list[torch.Tensor], optional): Raw points used to guide the
multi-modality fusion. Defaults to None.
multi-modality fusion. Defaults to None.
img_feats (list[torch.Tensor], optional): Image fetures used for
img_feats (list[torch.Tensor], optional): Image fe
a
tures used for
multi-modality fusion. Defaults to None.
multi-modality fusion. Defaults to None.
img_metas (dict, optional): [description]. Defaults to None.
img_metas (dict, optional): [description]. Defaults to None.
...
@@ -397,7 +397,7 @@ class HardVFE(nn.Module):
...
@@ -397,7 +397,7 @@ class HardVFE(nn.Module):
features (torch.Tensor): Features of voxels, shape is MxNxC.
features (torch.Tensor): Features of voxels, shape is MxNxC.
num_points (torch.Tensor): Number of points in each voxel.
num_points (torch.Tensor): Number of points in each voxel.
coors (torch.Tensor): Coordinates of voxels, shape is Mx(1+NDim).
coors (torch.Tensor): Coordinates of voxels, shape is Mx(1+NDim).
img_feats (list[torch.Tensor], optional): Image fetures used for
img_feats (list[torch.Tensor], optional): Image fe
a
tures used for
multi-modality fusion. Defaults to None.
multi-modality fusion. Defaults to None.
img_metas (dict, optional): [description]. Defaults to None.
img_metas (dict, optional): [description]. Defaults to None.
...
...
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment