Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
aec41c7f
"x/model/textprocessor.go" did not exist on "333e360422744e92275af2c1c2d5bc039ad97e8f"
Commit
aec41c7f
authored
Apr 27, 2020
by
zhangwenwei
Browse files
Merge master
parents
49f06039
4eca6606
Changes
62
Hide whitespace changes
Inline
Side-by-side
Showing
20 changed files
with
246 additions
and
346 deletions
+246
-346
mmdet3d/datasets/__init__.py
mmdet3d/datasets/__init__.py
+7
-3
mmdet3d/datasets/builder.py
mmdet3d/datasets/builder.py
+2
-1
mmdet3d/datasets/kitti_dataset.py
mmdet3d/datasets/kitti_dataset.py
+1
-1
mmdet3d/datasets/nuscenes2d_dataset.py
mmdet3d/datasets/nuscenes2d_dataset.py
+0
-38
mmdet3d/datasets/nuscenes_dataset.py
mmdet3d/datasets/nuscenes_dataset.py
+1
-1
mmdet3d/datasets/pipelines/__init__.py
mmdet3d/datasets/pipelines/__init__.py
+8
-1
mmdet3d/datasets/pipelines/dbsampler.py
mmdet3d/datasets/pipelines/dbsampler.py
+1
-1
mmdet3d/datasets/pipelines/formating.py
mmdet3d/datasets/pipelines/formating.py
+1
-1
mmdet3d/datasets/pipelines/loading.py
mmdet3d/datasets/pipelines/loading.py
+1
-1
mmdet3d/datasets/pipelines/train_aug.py
mmdet3d/datasets/pipelines/train_aug.py
+2
-2
mmdet3d/datasets/registry.py
mmdet3d/datasets/registry.py
+1
-1
mmdet3d/models/anchor_heads/boxvelo_head.py
mmdet3d/models/anchor_heads/boxvelo_head.py
+15
-27
mmdet3d/models/anchor_heads/second_head.py
mmdet3d/models/anchor_heads/second_head.py
+48
-64
mmdet3d/models/anchor_heads/train_mixins.py
mmdet3d/models/anchor_heads/train_mixins.py
+8
-21
mmdet3d/models/builder.py
mmdet3d/models/builder.py
+2
-3
mmdet3d/models/fusion_layers/point_fusion.py
mmdet3d/models/fusion_layers/point_fusion.py
+2
-3
mmdet3d/models/registry.py
mmdet3d/models/registry.py
+1
-1
mmdet3d/models/utils/__init__.py
mmdet3d/models/utils/__init__.py
+0
-3
mmdet3d/models/utils/weight_init.py
mmdet3d/models/utils/weight_init.py
+0
-46
mmdet3d/ops/iou3d/src/iou3d.cpp
mmdet3d/ops/iou3d/src/iou3d.cpp
+145
-127
No files found.
mmdet3d/datasets/__init__.py
View file @
aec41c7f
from
mmdet.datasets.
registry
import
DATASETS
from
mmdet.datasets.
builder
import
DATASETS
from
.builder
import
build_dataset
from
.builder
import
build_dataset
from
.dataset_wrappers
import
RepeatFactorDataset
from
.dataset_wrappers
import
RepeatFactorDataset
from
.kitti2d_dataset
import
Kitti2DDataset
from
.kitti2d_dataset
import
Kitti2DDataset
from
.kitti_dataset
import
KittiDataset
from
.kitti_dataset
import
KittiDataset
from
.loader
import
DistributedGroupSampler
,
GroupSampler
,
build_dataloader
from
.loader
import
DistributedGroupSampler
,
GroupSampler
,
build_dataloader
from
.nuscenes2d_dataset
import
NuScenes2DDataset
from
.nuscenes_dataset
import
NuScenesDataset
from
.nuscenes_dataset
import
NuScenesDataset
from
.pipelines
import
(
GlobalRotScale
,
ObjectNoise
,
ObjectRangeFilter
,
ObjectSample
,
PointShuffle
,
PointsRangeFilter
,
RandomFlip3D
)
__all__
=
[
__all__
=
[
'KittiDataset'
,
'GroupSampler'
,
'DistributedGroupSampler'
,
'KittiDataset'
,
'GroupSampler'
,
'DistributedGroupSampler'
,
'build_dataloader'
,
'RepeatFactorDataset'
,
'DATASETS'
,
'build_dataset'
,
'build_dataloader'
,
'RepeatFactorDataset'
,
'DATASETS'
,
'build_dataset'
,
'CocoDataset'
,
'Kitti2DDataset'
,
'NuScenesDataset'
,
'NuScenes2DDataset'
'CocoDataset'
,
'Kitti2DDataset'
,
'NuScenesDataset'
,
'ObjectSample'
,
'RandomFlip3D'
,
'ObjectNoise'
,
'GlobalRotScale'
,
'PointShuffle'
,
'ObjectRangeFilter'
,
'PointsRangeFilter'
,
'Collect3D'
]
]
mmdet3d/datasets/builder.py
View file @
aec41c7f
import
copy
import
copy
from
mmcv.utils
import
build_from_cfg
from
mmdet.datasets
import
DATASETS
,
ConcatDataset
,
RepeatDataset
from
mmdet.datasets
import
DATASETS
,
ConcatDataset
,
RepeatDataset
from
mmdet.utils
import
build_from_cfg
from
.dataset_wrappers
import
RepeatFactorDataset
from
.dataset_wrappers
import
RepeatFactorDataset
...
...
mmdet3d/datasets/kitti_dataset.py
View file @
aec41c7f
...
@@ -9,8 +9,8 @@ import torch
...
@@ -9,8 +9,8 @@ import torch
import
torch.utils.data
as
torch_data
import
torch.utils.data
as
torch_data
from
mmdet.datasets
import
DATASETS
from
mmdet.datasets
import
DATASETS
from
mmdet.datasets.pipelines
import
Compose
from
..core.bbox
import
box_np_ops
from
..core.bbox
import
box_np_ops
from
.pipelines
import
Compose
from
.utils
import
remove_dontcare
from
.utils
import
remove_dontcare
...
...
mmdet3d/datasets/nuscenes2d_dataset.py
deleted
100644 → 0
View file @
49f06039
from
pycocotools.coco
import
COCO
from
mmdet3d.core.evaluation.coco_utils
import
getImgIds
from
mmdet.datasets
import
DATASETS
,
CocoDataset
@
DATASETS
.
register_module
class
NuScenes2DDataset
(
CocoDataset
):
CLASSES
=
(
'car'
,
'truck'
,
'trailer'
,
'bus'
,
'construction_vehicle'
,
'bicycle'
,
'motorcycle'
,
'pedestrian'
,
'traffic_cone'
,
'barrier'
)
def
load_annotations
(
self
,
ann_file
):
if
not
self
.
class_names
:
self
.
class_names
=
self
.
CLASSES
self
.
coco
=
COCO
(
ann_file
)
# send class_names into the get id
# in case we only need to train on several classes
# by default self.class_names = CLASSES
self
.
cat_ids
=
self
.
coco
.
getCatIds
(
catNms
=
self
.
class_names
)
self
.
cat2label
=
{
cat_id
:
i
# + 1 rm +1 here thus the 0-79 are fg, 80 is bg
for
i
,
cat_id
in
enumerate
(
self
.
cat_ids
)
}
# send cat ids to the get img id
# in case we only need to train on several classes
if
len
(
self
.
cat_ids
)
<
len
(
self
.
CLASSES
):
self
.
img_ids
=
getImgIds
(
self
.
coco
,
catIds
=
self
.
cat_ids
)
else
:
self
.
img_ids
=
self
.
coco
.
getImgIds
()
img_infos
=
[]
for
i
in
self
.
img_ids
:
info
=
self
.
coco
.
loadImgs
([
i
])[
0
]
info
[
'filename'
]
=
info
[
'file_name'
]
img_infos
.
append
(
info
)
return
img_infos
mmdet3d/datasets/nuscenes_dataset.py
View file @
aec41c7f
...
@@ -9,8 +9,8 @@ import torch.utils.data as torch_data
...
@@ -9,8 +9,8 @@ import torch.utils.data as torch_data
from
nuscenes.utils.data_classes
import
Box
as
NuScenesBox
from
nuscenes.utils.data_classes
import
Box
as
NuScenesBox
from
mmdet.datasets
import
DATASETS
from
mmdet.datasets
import
DATASETS
from
mmdet.datasets.pipelines
import
Compose
from
..core.bbox
import
box_np_ops
from
..core.bbox
import
box_np_ops
from
.pipelines
import
Compose
@
DATASETS
.
register_module
@
DATASETS
.
register_module
...
...
mmdet3d/datasets/pipelines/__init__.py
View file @
aec41c7f
from
mmdet.datasets.pipelines
import
Compose
from
.dbsampler
import
DataBaseSampler
,
MMDataBaseSampler
from
.formating
import
DefaultFormatBundle
,
DefaultFormatBundle3D
from
.loading
import
LoadMultiViewImageFromFiles
,
LoadPointsFromFile
from
.train_aug
import
(
GlobalRotScale
,
ObjectNoise
,
ObjectRangeFilter
,
from
.train_aug
import
(
GlobalRotScale
,
ObjectNoise
,
ObjectRangeFilter
,
ObjectSample
,
PointShuffle
,
PointsRangeFilter
,
ObjectSample
,
PointShuffle
,
PointsRangeFilter
,
RandomFlip3D
)
RandomFlip3D
)
__all__
=
[
__all__
=
[
'ObjectSample'
,
'RandomFlip3D'
,
'ObjectNoise'
,
'GlobalRotScale'
,
'ObjectSample'
,
'RandomFlip3D'
,
'ObjectNoise'
,
'GlobalRotScale'
,
'PointShuffle'
,
'ObjectRangeFilter'
,
'PointsRangeFilter'
,
'Collect3D'
'PointShuffle'
,
'ObjectRangeFilter'
,
'PointsRangeFilter'
,
'Collect3D'
,
'Compose'
,
'LoadMultiViewImageFromFiles'
,
'LoadPointsFromFile'
,
'DefaultFormatBundle'
,
'DefaultFormatBundle3D'
,
'DataBaseSampler'
,
'MMDataBaseSampler'
]
]
mmdet3d/datasets/pipelines/dbsampler.py
View file @
aec41c7f
...
@@ -68,7 +68,7 @@ class DataBaseSampler(object):
...
@@ -68,7 +68,7 @@ class DataBaseSampler(object):
db_infos
=
pickle
.
load
(
f
)
db_infos
=
pickle
.
load
(
f
)
# filter database infos
# filter database infos
from
mmdet
3d
.apis
import
get_root_logger
from
mmdet.apis
import
get_root_logger
logger
=
get_root_logger
()
logger
=
get_root_logger
()
for
k
,
v
in
db_infos
.
items
():
for
k
,
v
in
db_infos
.
items
():
logger
.
info
(
f
'load
{
len
(
v
)
}
{
k
}
database infos'
)
logger
.
info
(
f
'load
{
len
(
v
)
}
{
k
}
database infos'
)
...
...
mmdet3d/datasets/pipelines/formating.py
View file @
aec41c7f
import
numpy
as
np
import
numpy
as
np
from
mmcv.parallel
import
DataContainer
as
DC
from
mmcv.parallel
import
DataContainer
as
DC
from
mmdet.datasets.builder
import
PIPELINES
from
mmdet.datasets.pipelines
import
to_tensor
from
mmdet.datasets.pipelines
import
to_tensor
from
mmdet.datasets.registry
import
PIPELINES
PIPELINES
.
_module_dict
.
pop
(
'DefaultFormatBundle'
)
PIPELINES
.
_module_dict
.
pop
(
'DefaultFormatBundle'
)
...
...
mmdet3d/datasets/pipelines/loading.py
View file @
aec41c7f
...
@@ -3,7 +3,7 @@ import os.path as osp
...
@@ -3,7 +3,7 @@ import os.path as osp
import
mmcv
import
mmcv
import
numpy
as
np
import
numpy
as
np
from
mmdet.datasets.
registry
import
PIPELINES
from
mmdet.datasets.
builder
import
PIPELINES
@
PIPELINES
.
register_module
@
PIPELINES
.
register_module
...
...
mmdet3d/datasets/pipelines/train_aug.py
View file @
aec41c7f
import
numpy
as
np
import
numpy
as
np
from
mmcv.utils
import
build_from_cfg
from
mmdet3d.core.bbox
import
box_np_ops
from
mmdet3d.core.bbox
import
box_np_ops
from
mmdet
3d.utils
import
build_from_cfg
from
mmdet
.datasets.builder
import
PIPELINES
from
mmdet.datasets.pipelines
import
RandomFlip
from
mmdet.datasets.pipelines
import
RandomFlip
from
mmdet.datasets.registry
import
PIPELINES
from
..registry
import
OBJECTSAMPLERS
from
..registry
import
OBJECTSAMPLERS
from
.data_augment_utils
import
noise_per_object_v3_
from
.data_augment_utils
import
noise_per_object_v3_
...
...
mmdet3d/datasets/registry.py
View file @
aec41c7f
from
mm
det
.utils
import
Registry
from
mm
cv
.utils
import
Registry
OBJECTSAMPLERS
=
Registry
(
'Object sampler'
)
OBJECTSAMPLERS
=
Registry
(
'Object sampler'
)
mmdet3d/models/anchor_heads/boxvelo_head.py
View file @
aec41c7f
import
numpy
as
np
import
numpy
as
np
import
torch
import
torch
from
mmcv.cnn
import
normal_init
from
mmcv.cnn
import
bias_init_with_prob
,
normal_init
from
mmdet3d.core
import
box_torch_ops
,
boxes3d_to_bev_torch_lidar
from
mmdet3d.core
import
box_torch_ops
,
boxes3d_to_bev_torch_lidar
from
mmdet3d.ops.iou3d.iou3d_utils
import
nms_gpu
,
nms_normal_gpu
from
mmdet3d.ops.iou3d.iou3d_utils
import
nms_gpu
,
nms_normal_gpu
from
mmdet.models
import
HEADS
from
mmdet.models
import
HEADS
from
..utils
import
bias_init_with_prob
from
.second_head
import
SECONDHead
from
.second_head
import
SECONDHead
...
@@ -15,12 +14,6 @@ class Anchor3DVeloHead(SECONDHead):
...
@@ -15,12 +14,6 @@ class Anchor3DVeloHead(SECONDHead):
Args:
Args:
in_channels (int): Number of channels in the input feature map.
in_channels (int): Number of channels in the input feature map.
feat_channels (int): Number of channels of the feature map.
feat_channels (int): Number of channels of the feature map.
anchor_scales (Iterable): Anchor scales.
anchor_ratios (Iterable): Anchor aspect ratios.
anchor_strides (Iterable): Anchor strides.
anchor_base_sizes (Iterable): Anchor base sizes.
target_means (Iterable): Mean values of regression targets.
target_stds (Iterable): Std values of regression targets.
loss_cls (dict): Config of classification loss.
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
loss_bbox (dict): Config of localization loss.
"""
# noqa: W605
"""
# noqa: W605
...
@@ -31,25 +24,25 @@ class Anchor3DVeloHead(SECONDHead):
...
@@ -31,25 +24,25 @@ class Anchor3DVeloHead(SECONDHead):
in_channels
,
in_channels
,
train_cfg
,
train_cfg
,
test_cfg
,
test_cfg
,
cache_anchor
=
False
,
feat_channels
=
256
,
feat_channels
=
256
,
use_direction_classifier
=
True
,
use_direction_classifier
=
True
,
encode_bg_as_zeros
=
False
,
encode_bg_as_zeros
=
False
,
box_code_size
=
9
,
box_code_size
=
9
,
anchor_generator
=
dict
(
type
=
'AnchorGeneratorRange'
,
),
anchor_generator
=
dict
(
anchor_range
=
[
0
,
-
39.68
,
-
1.78
,
69.12
,
39.68
,
-
1.78
],
type
=
'Anchor3DRangeGenerator'
,
anchor_strides
=
[
2
],
range
=
[
0
,
-
39.68
,
-
1.78
,
69.12
,
39.68
,
-
1.78
],
anchor_sizes
=
[[
1.6
,
3.9
,
1.56
]],
strides
=
[
2
],
anchor_rotations
=
[
0
,
1.57
],
sizes
=
[[
1.6
,
3.9
,
1.56
]],
anchor_custom_values
=
[
0
,
0
],
rotations
=
[
0
,
1.57
],
custom_values
=
[
0
,
0
],
reshape_out
=
True
,
),
assigner_per_size
=
False
,
assigner_per_size
=
False
,
assign_per_class
=
False
,
assign_per_class
=
False
,
diff_rad_by_sin
=
True
,
diff_rad_by_sin
=
True
,
dir_offset
=
0
,
dir_offset
=
0
,
dir_limit_offset
=
1
,
dir_limit_offset
=
1
,
target_means
=
(.
0
,
.
0
,
.
0
,
.
0
),
bbox_coder
=
dict
(
type
=
'DeltaXYZWLHRBBoxCoder'
),
target_stds
=
(
1.0
,
1.0
,
1.0
,
1.0
),
bbox_coder
=
dict
(
type
=
'Residual3DBoxCoder'
,
),
loss_cls
=
dict
(
loss_cls
=
dict
(
type
=
'CrossEntropyLoss'
,
type
=
'CrossEntropyLoss'
,
use_sigmoid
=
True
,
use_sigmoid
=
True
,
...
@@ -58,14 +51,11 @@ class Anchor3DVeloHead(SECONDHead):
...
@@ -58,14 +51,11 @@ class Anchor3DVeloHead(SECONDHead):
type
=
'SmoothL1Loss'
,
beta
=
1.0
/
9.0
,
loss_weight
=
2.0
),
type
=
'SmoothL1Loss'
,
beta
=
1.0
/
9.0
,
loss_weight
=
2.0
),
loss_dir
=
dict
(
type
=
'CrossEntropyLoss'
,
loss_weight
=
0.2
)):
loss_dir
=
dict
(
type
=
'CrossEntropyLoss'
,
loss_weight
=
0.2
)):
super
().
__init__
(
class_names
,
in_channels
,
train_cfg
,
test_cfg
,
super
().
__init__
(
class_names
,
in_channels
,
train_cfg
,
test_cfg
,
cache_anchor
,
feat_channels
,
use_direction_classifier
,
feat_channels
,
use_direction_classifier
,
encode_bg_as_zeros
,
box_code_size
,
anchor_generator
,
encode_bg_as_zeros
,
box_code_size
,
anchor_generator
,
anchor_range
,
anchor_strides
,
anchor_sizes
,
anchor_rotations
,
anchor_custom_values
,
assigner_per_size
,
assign_per_class
,
diff_rad_by_sin
,
assigner_per_size
,
assign_per_class
,
diff_rad_by_sin
,
dir_offset
,
dir_limit_offset
,
target_means
,
dir_offset
,
dir_limit_offset
,
bbox_coder
,
loss_cls
,
target_stds
,
bbox_coder
,
loss_cls
,
loss_bbox
,
loss_bbox
,
loss_dir
)
loss_dir
)
self
.
num_classes
=
num_classes
self
.
num_classes
=
num_classes
# build head layers & losses
# build head layers & losses
if
not
self
.
use_sigmoid_cls
:
if
not
self
.
use_sigmoid_cls
:
...
@@ -131,9 +121,7 @@ class Anchor3DVeloHead(SECONDHead):
...
@@ -131,9 +121,7 @@ class Anchor3DVeloHead(SECONDHead):
scores
=
scores
[
topk_inds
,
:]
scores
=
scores
[
topk_inds
,
:]
dir_cls_score
=
dir_cls_score
[
topk_inds
]
dir_cls_score
=
dir_cls_score
[
topk_inds
]
bboxes
=
self
.
bbox_coder
.
decode_torch
(
anchors
,
bbox_pred
,
bboxes
=
self
.
bbox_coder
.
decode
(
anchors
,
bbox_pred
)
self
.
target_means
,
self
.
target_stds
)
mlvl_bboxes
.
append
(
bboxes
)
mlvl_bboxes
.
append
(
bboxes
)
mlvl_scores
.
append
(
scores
)
mlvl_scores
.
append
(
scores
)
mlvl_dir_scores
.
append
(
dir_cls_score
)
mlvl_dir_scores
.
append
(
dir_cls_score
)
...
...
mmdet3d/models/anchor_heads/second_head.py
View file @
aec41c7f
from
__future__
import
division
import
numpy
as
np
import
numpy
as
np
import
torch
import
torch
import
torch.nn
as
nn
import
torch.nn
as
nn
from
mmcv.cnn
import
normal_init
from
mmcv.cnn
import
bias_init_with_prob
,
normal_init
from
mmdet3d.core
import
(
PseudoSampler
,
box_torch_ops
,
from
mmdet3d.core
import
(
PseudoSampler
,
box_torch_ops
,
boxes3d_to_bev_torch_lidar
,
build_anchor_generator
,
boxes3d_to_bev_torch_lidar
,
build_anchor_generator
,
...
@@ -12,24 +10,37 @@ from mmdet3d.core import (PseudoSampler, box_torch_ops,
...
@@ -12,24 +10,37 @@ from mmdet3d.core import (PseudoSampler, box_torch_ops,
from
mmdet3d.ops.iou3d.iou3d_utils
import
nms_gpu
,
nms_normal_gpu
from
mmdet3d.ops.iou3d.iou3d_utils
import
nms_gpu
,
nms_normal_gpu
from
mmdet.models
import
HEADS
from
mmdet.models
import
HEADS
from
..builder
import
build_loss
from
..builder
import
build_loss
from
..utils
import
bias_init_with_prob
from
.train_mixins
import
AnchorTrainMixin
from
.train_mixins
import
AnchorTrainMixin
@
HEADS
.
register_module
@
HEADS
.
register_module
class
SECONDHead
(
nn
.
Module
,
AnchorTrainMixin
):
class
SECONDHead
(
nn
.
Module
,
AnchorTrainMixin
):
"""Anchor-based head (RPN, RetinaNet, SSD, etc.).
"""Anchor-based head for VoxelNet detectors.
Args:
Args:
class_name (list[str]): name of classes (TODO: to be removed)
in_channels (int): Number of channels in the input feature map.
in_channels (int): Number of channels in the input feature map.
train_cfg (dict): train configs
test_cfg (dict): test configs
feat_channels (int): Number of channels of the feature map.
feat_channels (int): Number of channels of the feature map.
anchor_scales (Iterable): Anchor scales.
use_direction_classifier (bool): Whether to add a direction classifier.
anchor_ratios (Iterable): Anchor aspect ratios.
encode_bg_as_zeros (bool): Whether to use sigmoid of softmax
anchor_strides (Iterable): Anchor strides.
(TODO: to be removed)
anchor_base_sizes (Iterable): Anchor base sizes.
box_code_size (int): The size of box code.
target_means (Iterable): Mean values of regression targets.
anchor_generator(dict): Config dict of anchor generator.
target_stds (Iterable): Std values of regression targets.
assigner_per_size (bool): Whether to do assignment for each separate
anchor size.
assign_per_class (bool): Whether to do assignment for each class.
diff_rad_by_sin (bool): Whether to change the difference into sin
difference for box regression loss.
dir_offset (float | int): The offset of BEV rotation angles
(TODO: may be moved into box coder)
dirlimit_offset (float | int): The limited range of BEV rotation angles
(TODO: may be moved into box coder)
box_coder (dict): Config dict of box coders.
loss_cls (dict): Config of classification loss.
loss_cls (dict): Config of classification loss.
loss_bbox (dict): Config of localization loss.
loss_bbox (dict): Config of localization loss.
loss_dir (dict): Config of direction classifier loss.
"""
# noqa: W605
"""
# noqa: W605
def
__init__
(
self
,
def
__init__
(
self
,
...
@@ -37,25 +48,24 @@ class SECONDHead(nn.Module, AnchorTrainMixin):
...
@@ -37,25 +48,24 @@ class SECONDHead(nn.Module, AnchorTrainMixin):
in_channels
,
in_channels
,
train_cfg
,
train_cfg
,
test_cfg
,
test_cfg
,
cache_anchor
=
False
,
feat_channels
=
256
,
feat_channels
=
256
,
use_direction_classifier
=
True
,
use_direction_classifier
=
True
,
encode_bg_as_zeros
=
False
,
encode_bg_as_zeros
=
False
,
box_code_size
=
7
,
box_code_size
=
7
,
anchor_generator
=
dict
(
type
=
'AnchorGeneratorRange'
),
anchor_generator
=
dict
(
anchor_range
=
[
0
,
-
39.68
,
-
1.78
,
69.12
,
39.68
,
-
1.78
],
type
=
'Anchor3DRangeGenerator'
,
anchor_strides
=
[
2
],
range
=
[
0
,
-
39.68
,
-
1.78
,
69.12
,
39.68
,
-
1.78
],
anchor_sizes
=
[[
1.6
,
3.9
,
1.56
]],
strides
=
[
2
],
anchor_rotations
=
[
0
,
1.57
],
sizes
=
[[
1.6
,
3.9
,
1.56
]],
anchor_custom_values
=
[],
rotations
=
[
0
,
1.57
],
custom_values
=
[],
reshape_out
=
False
),
assigner_per_size
=
False
,
assigner_per_size
=
False
,
assign_per_class
=
False
,
assign_per_class
=
False
,
diff_rad_by_sin
=
True
,
diff_rad_by_sin
=
True
,
dir_offset
=
0
,
dir_offset
=
0
,
dir_limit_offset
=
1
,
dir_limit_offset
=
1
,
target_means
=
(.
0
,
.
0
,
.
0
,
.
0
),
bbox_coder
=
dict
(
type
=
'DeltaXYZWLHRBBoxCoder'
),
target_stds
=
(
1.0
,
1.0
,
1.0
,
1.0
),
bbox_coder
=
dict
(
type
=
'Residual3DBoxCoder'
),
loss_cls
=
dict
(
loss_cls
=
dict
(
type
=
'CrossEntropyLoss'
,
type
=
'CrossEntropyLoss'
,
use_sigmoid
=
True
,
use_sigmoid
=
True
,
...
@@ -94,29 +104,9 @@ class SECONDHead(nn.Module, AnchorTrainMixin):
...
@@ -94,29 +104,9 @@ class SECONDHead(nn.Module, AnchorTrainMixin):
]
]
# build anchor generator
# build anchor generator
self
.
anchor_range
=
anchor_range
self
.
anchor_generator
=
build_anchor_generator
(
anchor_generator
)
self
.
anchor_rotations
=
anchor_rotations
self
.
anchor_strides
=
anchor_strides
self
.
anchor_sizes
=
anchor_sizes
self
.
target_means
=
target_means
self
.
target_stds
=
target_stds
self
.
anchor_generators
=
[]
# In 3D detection, the anchor stride is connected with anchor size
# In 3D detection, the anchor stride is connected with anchor size
self
.
num_anchors
=
(
self
.
num_anchors
=
self
.
anchor_generator
.
num_base_anchors
len
(
self
.
anchor_rotations
)
*
len
(
self
.
anchor_sizes
))
# if len(self.anchor_sizes) != self.anchor_strides:
# # this means different anchor in the same anchor strides
# anchor_sizes = [self.anchor_sizes]
for
anchor_stride
in
self
.
anchor_strides
:
anchor_generator
.
update
(
anchor_ranges
=
anchor_range
,
sizes
=
self
.
anchor_sizes
,
stride
=
anchor_stride
,
rotations
=
anchor_rotations
,
custom_values
=
anchor_custom_values
,
cache_anchor
=
cache_anchor
)
self
.
anchor_generators
.
append
(
build_anchor_generator
(
anchor_generator
))
self
.
_init_layers
()
self
.
_init_layers
()
self
.
use_sigmoid_cls
=
loss_cls
.
get
(
'use_sigmoid'
,
False
)
self
.
use_sigmoid_cls
=
loss_cls
.
get
(
'use_sigmoid'
,
False
)
...
@@ -152,7 +142,7 @@ class SECONDHead(nn.Module, AnchorTrainMixin):
...
@@ -152,7 +142,7 @@ class SECONDHead(nn.Module, AnchorTrainMixin):
def
forward
(
self
,
feats
):
def
forward
(
self
,
feats
):
return
multi_apply
(
self
.
forward_single
,
feats
)
return
multi_apply
(
self
.
forward_single
,
feats
)
def
get_anchors
(
self
,
featmap_sizes
,
input_metas
):
def
get_anchors
(
self
,
featmap_sizes
,
input_metas
,
device
=
'cuda'
):
"""Get anchors according to feature map sizes.
"""Get anchors according to feature map sizes.
Args:
Args:
featmap_sizes (list[tuple]): Multi-level feature map sizes.
featmap_sizes (list[tuple]): Multi-level feature map sizes.
...
@@ -161,16 +151,10 @@ class SECONDHead(nn.Module, AnchorTrainMixin):
...
@@ -161,16 +151,10 @@ class SECONDHead(nn.Module, AnchorTrainMixin):
tuple: anchors of each image, valid flags of each image
tuple: anchors of each image, valid flags of each image
"""
"""
num_imgs
=
len
(
input_metas
)
num_imgs
=
len
(
input_metas
)
num_levels
=
len
(
featmap_sizes
)
# since feature map sizes of all images are the same, we only compute
# since feature map sizes of all images are the same, we only compute
# anchors for one time
# anchors for one time
multi_level_anchors
=
[]
multi_level_anchors
=
self
.
anchor_generator
.
grid_anchors
(
for
i
in
range
(
num_levels
):
featmap_sizes
,
device
=
device
)
anchors
=
self
.
anchor_generators
[
i
].
grid_anchors
(
featmap_sizes
[
i
])
if
not
self
.
assigner_per_size
:
anchors
=
anchors
.
reshape
(
-
1
,
anchors
.
size
(
-
1
))
multi_level_anchors
.
append
(
anchors
)
anchor_list
=
[
multi_level_anchors
for
_
in
range
(
num_imgs
)]
anchor_list
=
[
multi_level_anchors
for
_
in
range
(
num_imgs
)]
return
anchor_list
return
anchor_list
...
@@ -237,16 +221,15 @@ class SECONDHead(nn.Module, AnchorTrainMixin):
...
@@ -237,16 +221,15 @@ class SECONDHead(nn.Module, AnchorTrainMixin):
input_metas
,
input_metas
,
gt_bboxes_ignore
=
None
):
gt_bboxes_ignore
=
None
):
featmap_sizes
=
[
featmap
.
size
()[
-
2
:]
for
featmap
in
cls_scores
]
featmap_sizes
=
[
featmap
.
size
()[
-
2
:]
for
featmap
in
cls_scores
]
assert
len
(
featmap_sizes
)
==
len
(
self
.
anchor_generators
)
assert
len
(
featmap_sizes
)
==
self
.
anchor_generator
.
num_levels
device
=
cls_scores
[
0
].
device
anchor_list
=
self
.
get_anchors
(
featmap_sizes
,
input_metas
)
anchor_list
=
self
.
get_anchors
(
featmap_sizes
,
input_metas
,
device
=
device
)
label_channels
=
self
.
cls_out_channels
if
self
.
use_sigmoid_cls
else
1
label_channels
=
self
.
cls_out_channels
if
self
.
use_sigmoid_cls
else
1
cls_reg_targets
=
self
.
anchor_target_3d
(
cls_reg_targets
=
self
.
anchor_target_3d
(
anchor_list
,
anchor_list
,
gt_bboxes
,
gt_bboxes
,
input_metas
,
input_metas
,
self
.
target_means
,
self
.
target_stds
,
gt_bboxes_ignore_list
=
gt_bboxes_ignore
,
gt_bboxes_ignore_list
=
gt_bboxes_ignore
,
gt_labels_list
=
gt_labels
,
gt_labels_list
=
gt_labels
,
num_classes
=
self
.
num_classes
,
num_classes
=
self
.
num_classes
,
...
@@ -288,12 +271,14 @@ class SECONDHead(nn.Module, AnchorTrainMixin):
...
@@ -288,12 +271,14 @@ class SECONDHead(nn.Module, AnchorTrainMixin):
assert
len
(
cls_scores
)
==
len
(
bbox_preds
)
assert
len
(
cls_scores
)
==
len
(
bbox_preds
)
assert
len
(
cls_scores
)
==
len
(
dir_cls_preds
)
assert
len
(
cls_scores
)
==
len
(
dir_cls_preds
)
num_levels
=
len
(
cls_scores
)
num_levels
=
len
(
cls_scores
)
featmap_sizes
=
[
cls_scores
[
i
].
shape
[
-
2
:]
for
i
in
range
(
num_levels
)]
device
=
cls_scores
[
0
].
device
mlvl_anchors
=
self
.
anchor_generator
.
grid_anchors
(
featmap_sizes
,
device
=
device
)
mlvl_anchors
=
[
mlvl_anchors
=
[
self
.
anchor_generators
[
i
].
grid_anchors
(
anchor
.
reshape
(
-
1
,
self
.
box_code_size
)
for
anchor
in
mlvl_anchors
cls_scores
[
i
].
size
()[
-
2
:]).
reshape
(
-
1
,
self
.
box_code_size
)
for
i
in
range
(
num_levels
)
]
]
result_list
=
[]
result_list
=
[]
for
img_id
in
range
(
len
(
input_metas
)):
for
img_id
in
range
(
len
(
input_metas
)):
cls_score_list
=
[
cls_score_list
=
[
...
@@ -353,9 +338,7 @@ class SECONDHead(nn.Module, AnchorTrainMixin):
...
@@ -353,9 +338,7 @@ class SECONDHead(nn.Module, AnchorTrainMixin):
bbox_pred
=
bbox_pred
[
thr_inds
]
bbox_pred
=
bbox_pred
[
thr_inds
]
scores
=
scores
[
thr_inds
]
scores
=
scores
[
thr_inds
]
dir_cls_scores
=
dir_cls_score
[
thr_inds
]
dir_cls_scores
=
dir_cls_score
[
thr_inds
]
bboxes
=
self
.
bbox_coder
.
decode_torch
(
anchors
,
bbox_pred
,
bboxes
=
self
.
bbox_coder
.
decode
(
anchors
,
bbox_pred
)
self
.
target_means
,
self
.
target_stds
)
bboxes_for_nms
=
boxes3d_to_bev_torch_lidar
(
bboxes
)
bboxes_for_nms
=
boxes3d_to_bev_torch_lidar
(
bboxes
)
mlvl_bboxes_for_nms
.
append
(
bboxes_for_nms
)
mlvl_bboxes_for_nms
.
append
(
bboxes_for_nms
)
mlvl_bboxes
.
append
(
bboxes
)
mlvl_bboxes
.
append
(
bboxes
)
...
@@ -383,6 +366,7 @@ class SECONDHead(nn.Module, AnchorTrainMixin):
...
@@ -383,6 +366,7 @@ class SECONDHead(nn.Module, AnchorTrainMixin):
selected_scores
=
mlvl_scores
[
selected
]
selected_scores
=
mlvl_scores
[
selected
]
selected_label_preds
=
mlvl_label_preds
[
selected
]
selected_label_preds
=
mlvl_label_preds
[
selected
]
selected_dir_scores
=
mlvl_dir_scores
[
selected
]
selected_dir_scores
=
mlvl_dir_scores
[
selected
]
# TODO: move dir_offset to box coder
dir_rot
=
box_torch_ops
.
limit_period
(
dir_rot
=
box_torch_ops
.
limit_period
(
selected_bboxes
[...,
-
1
]
-
self
.
dir_offset
,
selected_bboxes
[...,
-
1
]
-
self
.
dir_offset
,
self
.
dir_limit_offset
,
np
.
pi
)
self
.
dir_limit_offset
,
np
.
pi
)
...
...
mmdet3d/models/anchor_heads/train_mixins.py
View file @
aec41c7f
...
@@ -11,8 +11,6 @@ class AnchorTrainMixin(object):
...
@@ -11,8 +11,6 @@ class AnchorTrainMixin(object):
anchor_list
,
anchor_list
,
gt_bboxes_list
,
gt_bboxes_list
,
input_metas
,
input_metas
,
target_means
,
target_stds
,
gt_bboxes_ignore_list
=
None
,
gt_bboxes_ignore_list
=
None
,
gt_labels_list
=
None
,
gt_labels_list
=
None
,
label_channels
=
1
,
label_channels
=
1
,
...
@@ -24,8 +22,6 @@ class AnchorTrainMixin(object):
...
@@ -24,8 +22,6 @@ class AnchorTrainMixin(object):
anchor_list (list[list]): Multi level anchors of each image.
anchor_list (list[list]): Multi level anchors of each image.
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.
img_metas (list[dict]): Meta info of each image.
img_metas (list[dict]): Meta info of each image.
target_means (Iterable): Mean value of regression targets.
target_stds (Iterable): Std value of regression targets.
Returns:
Returns:
tuple
tuple
...
@@ -57,8 +53,6 @@ class AnchorTrainMixin(object):
...
@@ -57,8 +53,6 @@ class AnchorTrainMixin(object):
gt_bboxes_ignore_list
,
gt_bboxes_ignore_list
,
gt_labels_list
,
gt_labels_list
,
input_metas
,
input_metas
,
target_means
=
target_means
,
target_stds
=
target_stds
,
label_channels
=
label_channels
,
label_channels
=
label_channels
,
num_classes
=
num_classes
,
num_classes
=
num_classes
,
sampling
=
sampling
)
sampling
=
sampling
)
...
@@ -89,8 +83,6 @@ class AnchorTrainMixin(object):
...
@@ -89,8 +83,6 @@ class AnchorTrainMixin(object):
gt_bboxes_ignore
,
gt_bboxes_ignore
,
gt_labels
,
gt_labels
,
input_meta
,
input_meta
,
target_means
,
target_stds
,
label_channels
=
1
,
label_channels
=
1
,
num_classes
=
1
,
num_classes
=
1
,
sampling
=
True
):
sampling
=
True
):
...
@@ -111,13 +103,12 @@ class AnchorTrainMixin(object):
...
@@ -111,13 +103,12 @@ class AnchorTrainMixin(object):
anchor_targets
=
self
.
anchor_target_single_assigner
(
anchor_targets
=
self
.
anchor_target_single_assigner
(
assigner
,
current_anchors
,
gt_bboxes
[
gt_per_cls
,
:],
assigner
,
current_anchors
,
gt_bboxes
[
gt_per_cls
,
:],
gt_bboxes_ignore
,
gt_labels
[
gt_per_cls
],
input_meta
,
gt_bboxes_ignore
,
gt_labels
[
gt_per_cls
],
input_meta
,
target_means
,
target_stds
,
label_channels
,
num_classes
,
label_channels
,
num_classes
,
sampling
)
sampling
)
else
:
else
:
anchor_targets
=
self
.
anchor_target_single_assigner
(
anchor_targets
=
self
.
anchor_target_single_assigner
(
assigner
,
current_anchors
,
gt_bboxes
,
gt_bboxes_ignore
,
assigner
,
current_anchors
,
gt_bboxes
,
gt_bboxes_ignore
,
gt_labels
,
input_meta
,
target_means
,
target_std
s
,
gt_labels
,
input_meta
,
label_channels
,
num_classe
s
,
label_channels
,
num_classes
,
sampling
)
sampling
)
(
labels
,
label_weights
,
bbox_targets
,
bbox_weights
,
(
labels
,
label_weights
,
bbox_targets
,
bbox_weights
,
dir_targets
,
dir_weights
,
pos_inds
,
neg_inds
)
=
anchor_targets
dir_targets
,
dir_weights
,
pos_inds
,
neg_inds
)
=
anchor_targets
...
@@ -156,8 +147,7 @@ class AnchorTrainMixin(object):
...
@@ -156,8 +147,7 @@ class AnchorTrainMixin(object):
else
:
else
:
return
self
.
anchor_target_single_assigner
(
return
self
.
anchor_target_single_assigner
(
self
.
bbox_assigner
,
anchors
,
gt_bboxes
,
gt_bboxes_ignore
,
self
.
bbox_assigner
,
anchors
,
gt_bboxes
,
gt_bboxes_ignore
,
gt_labels
,
input_meta
,
target_means
,
target_stds
,
gt_labels
,
input_meta
,
label_channels
,
num_classes
,
sampling
)
label_channels
,
num_classes
,
sampling
)
def
anchor_target_single_assigner
(
self
,
def
anchor_target_single_assigner
(
self
,
bbox_assigner
,
bbox_assigner
,
...
@@ -166,8 +156,6 @@ class AnchorTrainMixin(object):
...
@@ -166,8 +156,6 @@ class AnchorTrainMixin(object):
gt_bboxes_ignore
,
gt_bboxes_ignore
,
gt_labels
,
gt_labels
,
input_meta
,
input_meta
,
target_means
,
target_stds
,
label_channels
=
1
,
label_channels
=
1
,
num_classes
=
1
,
num_classes
=
1
,
sampling
=
True
):
sampling
=
True
):
...
@@ -188,18 +176,17 @@ class AnchorTrainMixin(object):
...
@@ -188,18 +176,17 @@ class AnchorTrainMixin(object):
neg_inds
=
sampling_result
.
neg_inds
neg_inds
=
sampling_result
.
neg_inds
else
:
else
:
pos_inds
=
torch
.
nonzero
(
pos_inds
=
torch
.
nonzero
(
anchors
.
new_zeros
((
anchors
.
shape
[
0
],
),
dtype
=
torch
.
long
)
>
0
anchors
.
new_zeros
((
anchors
.
shape
[
0
],
),
dtype
=
torch
.
bool
)
>
0
).
squeeze
(
-
1
).
unique
()
).
squeeze
(
-
1
).
unique
()
neg_inds
=
torch
.
nonzero
(
neg_inds
=
torch
.
nonzero
(
anchors
.
new_zeros
((
anchors
.
shape
[
0
],
),
dtype
=
torch
.
long
)
==
anchors
.
new_zeros
((
anchors
.
shape
[
0
],
),
dtype
=
torch
.
bool
)
==
0
).
squeeze
(
-
1
).
unique
()
0
).
squeeze
(
-
1
).
unique
()
if
gt_labels
is
not
None
:
if
gt_labels
is
not
None
:
labels
+=
num_classes
labels
+=
num_classes
if
len
(
pos_inds
)
>
0
:
if
len
(
pos_inds
)
>
0
:
pos_bbox_targets
=
self
.
bbox_coder
.
encode_torch
(
pos_bbox_targets
=
self
.
bbox_coder
.
encode
(
sampling_result
.
pos_bboxes
,
sampling_result
.
pos_gt_bboxes
,
sampling_result
.
pos_bboxes
,
sampling_result
.
pos_gt_bboxes
)
target_means
,
target_stds
)
pos_dir_targets
=
get_direction_target
(
pos_dir_targets
=
get_direction_target
(
sampling_result
.
pos_bboxes
,
sampling_result
.
pos_bboxes
,
pos_bbox_targets
,
pos_bbox_targets
,
...
...
mmdet3d/models/builder.py
View file @
aec41c7f
from
mmdet.models.builder
import
build
from
mmdet.models.builder
import
(
BACKBONES
,
DETECTORS
,
HEADS
,
LOSSES
,
NECKS
,
from
mmdet.models.registry
import
(
BACKBONES
,
DETECTORS
,
HEADS
,
LOSSES
,
NECKS
,
ROI_EXTRACTORS
,
SHARED_HEADS
,
build
)
ROI_EXTRACTORS
,
SHARED_HEADS
)
from
.registry
import
FUSION_LAYERS
,
MIDDLE_ENCODERS
,
VOXEL_ENCODERS
from
.registry
import
FUSION_LAYERS
,
MIDDLE_ENCODERS
,
VOXEL_ENCODERS
...
...
mmdet3d/models/fusion_layers/point_fusion.py
View file @
aec41c7f
...
@@ -235,9 +235,8 @@ class PointFusion(nn.Module):
...
@@ -235,9 +235,8 @@ class PointFusion(nn.Module):
pts
.
new_tensor
(
img_meta
[
'pcd_trans'
])
pts
.
new_tensor
(
img_meta
[
'pcd_trans'
])
if
'pcd_trans'
in
img_meta
.
keys
()
else
0
)
if
'pcd_trans'
in
img_meta
.
keys
()
else
0
)
pcd_rotate_mat
=
(
pcd_rotate_mat
=
(
pts
.
new_tensor
(
img_meta
[
'pcd_rotation'
])
pts
.
new_tensor
(
img_meta
[
'pcd_rotation'
])
if
'pcd_rotation'
if
'pcd_rotation'
in
img_meta
.
keys
()
else
in
img_meta
.
keys
()
else
torch
.
eye
(
3
).
type_as
(
pts
).
to
(
pts
.
device
))
torch
.
eye
(
3
).
type_as
(
pts
).
to
(
pts
.
device
))
img_scale_factor
=
(
img_scale_factor
=
(
img_meta
[
'scale_factor'
]
img_meta
[
'scale_factor'
]
if
'scale_factor'
in
img_meta
.
keys
()
else
1
)
if
'scale_factor'
in
img_meta
.
keys
()
else
1
)
...
...
mmdet3d/models/registry.py
View file @
aec41c7f
from
mm
det
.utils
import
Registry
from
mm
cv
.utils
import
Registry
VOXEL_ENCODERS
=
Registry
(
'voxel_encoder'
)
VOXEL_ENCODERS
=
Registry
(
'voxel_encoder'
)
MIDDLE_ENCODERS
=
Registry
(
'middle_encoder'
)
MIDDLE_ENCODERS
=
Registry
(
'middle_encoder'
)
...
...
mmdet3d/models/utils/__init__.py
deleted
100644 → 0
View file @
49f06039
from
mmdet.models.utils
import
ResLayer
,
bias_init_with_prob
__all__
=
[
'bias_init_with_prob'
,
'ResLayer'
]
mmdet3d/models/utils/weight_init.py
deleted
100644 → 0
View file @
49f06039
import
numpy
as
np
import
torch.nn
as
nn
def
xavier_init
(
module
,
gain
=
1
,
bias
=
0
,
distribution
=
'normal'
):
assert
distribution
in
[
'uniform'
,
'normal'
]
if
distribution
==
'uniform'
:
nn
.
init
.
xavier_uniform_
(
module
.
weight
,
gain
=
gain
)
else
:
nn
.
init
.
xavier_normal_
(
module
.
weight
,
gain
=
gain
)
if
hasattr
(
module
,
'bias'
):
nn
.
init
.
constant_
(
module
.
bias
,
bias
)
def
normal_init
(
module
,
mean
=
0
,
std
=
1
,
bias
=
0
):
nn
.
init
.
normal_
(
module
.
weight
,
mean
,
std
)
if
hasattr
(
module
,
'bias'
):
nn
.
init
.
constant_
(
module
.
bias
,
bias
)
def
uniform_init
(
module
,
a
=
0
,
b
=
1
,
bias
=
0
):
nn
.
init
.
uniform_
(
module
.
weight
,
a
,
b
)
if
hasattr
(
module
,
'bias'
):
nn
.
init
.
constant_
(
module
.
bias
,
bias
)
def
kaiming_init
(
module
,
mode
=
'fan_out'
,
nonlinearity
=
'relu'
,
bias
=
0
,
distribution
=
'normal'
):
assert
distribution
in
[
'uniform'
,
'normal'
]
if
distribution
==
'uniform'
:
nn
.
init
.
kaiming_uniform_
(
module
.
weight
,
mode
=
mode
,
nonlinearity
=
nonlinearity
)
else
:
nn
.
init
.
kaiming_normal_
(
module
.
weight
,
mode
=
mode
,
nonlinearity
=
nonlinearity
)
if
hasattr
(
module
,
'bias'
):
nn
.
init
.
constant_
(
module
.
bias
,
bias
)
def
bias_init_with_prob
(
prior_prob
):
""" initialize conv/fc bias value according to giving probablity"""
bias_init
=
float
(
-
np
.
log
((
1
-
prior_prob
)
/
prior_prob
))
return
bias_init
mmdet3d/ops/iou3d/src/iou3d.cpp
View file @
aec41c7f
#include <torch/serialize/tensor.h>
#include <torch/extension.h>
#include <vector>
#include <cuda.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cuda_runtime_api.h>
#include <torch/extension.h>
#include <torch/serialize/tensor.h>
#define CHECK_CUDA(x) AT_CHECK(x.type().is_cuda(), #x, " must be a CUDAtensor ")
#include <vector>
#define CHECK_CONTIGUOUS(x) AT_CHECK(x.is_contiguous(), #x, " must be contiguous ")
#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x)
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
#define CHECK_ERROR(ans) { gpuAssert((ans), __FILE__, __LINE__); }
#define CHECK_CUDA(x) \
inline
void
gpuAssert
(
cudaError_t
code
,
const
char
*
file
,
int
line
,
bool
abort
=
true
)
TORCH_CHECK(x.device().is_cuda(), #x, " must be a CUDAtensor ")
{
#define CHECK_CONTIGUOUS(x) \
if
(
code
!=
cudaSuccess
)
TORCH_CHECK(x.is_contiguous(), #x, " must be contiguous ")
{
#define CHECK_INPUT(x) \
fprintf
(
stderr
,
"GPUassert: %s %s %d
\n
"
,
cudaGetErrorString
(
code
),
file
,
line
);
CHECK_CUDA(x); \
if
(
abort
)
exit
(
code
);
CHECK_CONTIGUOUS(x)
}
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define CHECK_ERROR(ans) \
{ gpuAssert((ans), __FILE__, __LINE__); }
inline
void
gpuAssert
(
cudaError_t
code
,
const
char
*
file
,
int
line
,
bool
abort
=
true
)
{
if
(
code
!=
cudaSuccess
)
{
fprintf
(
stderr
,
"GPUassert: %s %s %d
\n
"
,
cudaGetErrorString
(
code
),
file
,
line
);
if
(
abort
)
exit
(
code
);
}
}
}
const
int
THREADS_PER_BLOCK_NMS
=
sizeof
(
unsigned
long
long
)
*
8
;
const
int
THREADS_PER_BLOCK_NMS
=
sizeof
(
unsigned
long
long
)
*
8
;
void
boxesoverlapLauncher
(
const
int
num_a
,
const
float
*
boxes_a
,
void
boxesoverlapLauncher
(
const
int
num_a
,
const
float
*
boxes_a
,
const
int
num_b
,
const
float
*
boxes_b
,
float
*
ans_overlap
);
const
int
num_b
,
const
float
*
boxes_b
,
void
boxesioubevLauncher
(
const
int
num_a
,
const
float
*
boxes_a
,
const
int
num_b
,
const
float
*
boxes_b
,
float
*
ans_iou
);
float
*
ans_overlap
);
void
nmsLauncher
(
const
float
*
boxes
,
unsigned
long
long
*
mask
,
int
boxes_num
,
float
nms_overlap_thresh
);
void
boxesioubevLauncher
(
const
int
num_a
,
const
float
*
boxes_a
,
const
int
num_b
,
void
nmsNormalLauncher
(
const
float
*
boxes
,
unsigned
long
long
*
mask
,
int
boxes_num
,
float
nms_overlap_thresh
);
const
float
*
boxes_b
,
float
*
ans_iou
);
void
nmsLauncher
(
const
float
*
boxes
,
unsigned
long
long
*
mask
,
int
boxes_num
,
int
boxes_overlap_bev_gpu
(
at
::
Tensor
boxes_a
,
at
::
Tensor
boxes_b
,
at
::
Tensor
ans_overlap
){
float
nms_overlap_thresh
);
// params boxes_a: (N, 5) [x1, y1, x2, y2, ry]
void
nmsNormalLauncher
(
const
float
*
boxes
,
unsigned
long
long
*
mask
,
// params boxes_b: (M, 5)
int
boxes_num
,
float
nms_overlap_thresh
);
// params ans_overlap: (N, M)
int
boxes_overlap_bev_gpu
(
at
::
Tensor
boxes_a
,
at
::
Tensor
boxes_b
,
CHECK_INPUT
(
boxes_a
);
at
::
Tensor
ans_overlap
)
{
CHECK_INPUT
(
boxes_b
);
// params boxes_a: (N, 5) [x1, y1, x2, y2, ry]
CHECK_INPUT
(
ans_overlap
);
// params boxes_b: (M, 5)
// params ans_overlap: (N, M)
int
num_a
=
boxes_a
.
size
(
0
);
int
num_b
=
boxes_b
.
size
(
0
);
CHECK_INPUT
(
boxes_a
);
CHECK_INPUT
(
boxes_b
);
const
float
*
boxes_a_data
=
boxes_a
.
data
<
float
>
();
CHECK_INPUT
(
ans_overlap
);
const
float
*
boxes_b_data
=
boxes_b
.
data
<
float
>
();
float
*
ans_overlap_data
=
ans_overlap
.
data
<
float
>
();
int
num_a
=
boxes_a
.
size
(
0
);
int
num_b
=
boxes_b
.
size
(
0
);
boxesoverlapLauncher
(
num_a
,
boxes_a_data
,
num_b
,
boxes_b_data
,
ans_overlap_data
);
const
float
*
boxes_a_data
=
boxes_a
.
data_ptr
<
float
>
();
return
1
;
const
float
*
boxes_b_data
=
boxes_b
.
data_ptr
<
float
>
();
float
*
ans_overlap_data
=
ans_overlap
.
data_ptr
<
float
>
();
boxesoverlapLauncher
(
num_a
,
boxes_a_data
,
num_b
,
boxes_b_data
,
ans_overlap_data
);
return
1
;
}
}
int
boxes_iou_bev_gpu
(
at
::
Tensor
boxes_a
,
at
::
Tensor
boxes_b
,
at
::
Tensor
ans_iou
){
int
boxes_iou_bev_gpu
(
at
::
Tensor
boxes_a
,
at
::
Tensor
boxes_b
,
// params boxes_a: (N, 5) [x1, y1, x2, y2, ry]
at
::
Tensor
ans_iou
)
{
// params boxes_b: (M, 5)
// params boxes_a: (N, 5) [x1, y1, x2, y2, ry]
// params ans_overlap: (N, M)
// params boxes_b: (M, 5)
// params ans_overlap: (N, M)
CHECK_INPUT
(
boxes_a
);
CHECK_INPUT
(
boxes_a
);
CHECK_INPUT
(
boxes_b
);
CHECK_INPUT
(
boxes_b
);
CHECK_INPUT
(
ans_iou
);
CHECK_INPUT
(
ans_iou
);
int
num_a
=
boxes_a
.
size
(
0
);
int
num_a
=
boxes_a
.
size
(
0
);
int
num_b
=
boxes_b
.
size
(
0
);
int
num_b
=
boxes_b
.
size
(
0
);
const
float
*
boxes_a_data
=
boxes_a
.
data
<
float
>
();
const
float
*
boxes_a_data
=
boxes_a
.
data
_ptr
<
float
>
();
const
float
*
boxes_b_data
=
boxes_b
.
data
<
float
>
();
const
float
*
boxes_b_data
=
boxes_b
.
data
_ptr
<
float
>
();
float
*
ans_iou_data
=
ans_iou
.
data
<
float
>
();
float
*
ans_iou_data
=
ans_iou
.
data
_ptr
<
float
>
();
boxesioubevLauncher
(
num_a
,
boxes_a_data
,
num_b
,
boxes_b_data
,
ans_iou_data
);
boxesioubevLauncher
(
num_a
,
boxes_a_data
,
num_b
,
boxes_b_data
,
ans_iou_data
);
return
1
;
return
1
;
}
}
int
nms_gpu
(
at
::
Tensor
boxes
,
at
::
Tensor
keep
,
float
nms_overlap_thresh
){
int
nms_gpu
(
at
::
Tensor
boxes
,
at
::
Tensor
keep
,
float
nms_overlap_thresh
)
{
// params boxes: (N, 5) [x1, y1, x2, y2, ry]
// params boxes: (N, 5) [x1, y1, x2, y2, ry]
// params keep: (N)
// params keep: (N)
CHECK_INPUT
(
boxes
);
CHECK_INPUT
(
boxes
);
CHECK_CONTIGUOUS
(
keep
);
CHECK_CONTIGUOUS
(
keep
);
int
boxes_num
=
boxes
.
size
(
0
);
int
boxes_num
=
boxes
.
size
(
0
);
const
float
*
boxes_data
=
boxes
.
data
<
float
>
();
const
float
*
boxes_data
=
boxes
.
data
_ptr
<
float
>
();
long
*
keep_data
=
keep
.
data
<
long
>
();
long
*
keep_data
=
keep
.
data
_ptr
<
long
>
();
const
int
col_blocks
=
DIVUP
(
boxes_num
,
THREADS_PER_BLOCK_NMS
);
const
int
col_blocks
=
DIVUP
(
boxes_num
,
THREADS_PER_BLOCK_NMS
);
unsigned
long
long
*
mask_data
=
NULL
;
unsigned
long
long
*
mask_data
=
NULL
;
CHECK_ERROR
(
cudaMalloc
((
void
**
)
&
mask_data
,
boxes_num
*
col_blocks
*
sizeof
(
unsigned
long
long
)));
CHECK_ERROR
(
cudaMalloc
((
void
**
)
&
mask_data
,
nmsLauncher
(
boxes_data
,
mask_data
,
boxes_num
,
nms_overlap_thresh
);
boxes_num
*
col_blocks
*
sizeof
(
unsigned
long
long
)));
nmsLauncher
(
boxes_data
,
mask_data
,
boxes_num
,
nms_overlap_thresh
);
// unsigned long long mask_cpu[boxes_num * col_blocks];
// unsigned long long mask_cpu[boxes_num * col_blocks];
// unsigned long long *mask_cpu = new unsigned long long [boxes_num * col_blocks];
// unsigned long long *mask_cpu = new unsigned long long [boxes_num *
std
::
vector
<
unsigned
long
long
>
mask_cpu
(
boxes_num
*
col_blocks
);
// col_blocks];
std
::
vector
<
unsigned
long
long
>
mask_cpu
(
boxes_num
*
col_blocks
);
// printf("boxes_num=%d, col_blocks=%d\n", boxes_num, col_blocks);
// printf("boxes_num=%d, col_blocks=%d\n", boxes_num, col_blocks);
CHECK_ERROR
(
cudaMemcpy
(
&
mask_cpu
[
0
],
mask_data
,
boxes_num
*
col_blocks
*
sizeof
(
unsigned
long
long
),
CHECK_ERROR
(
cudaMemcpy
(
&
mask_cpu
[
0
],
mask_data
,
cudaMemcpyDeviceToHost
));
boxes_num
*
col_blocks
*
sizeof
(
unsigned
long
long
),
cudaMemcpyDeviceToHost
));
cudaFree
(
mask_data
);
cudaFree
(
mask_data
);
unsigned
long
long
remv_cpu
[
col_blocks
];
unsigned
long
long
remv_cpu
[
col_blocks
];
memset
(
remv_cpu
,
0
,
col_blocks
*
sizeof
(
unsigned
long
long
));
memset
(
remv_cpu
,
0
,
col_blocks
*
sizeof
(
unsigned
long
long
));
int
num_to_keep
=
0
;
int
num_to_keep
=
0
;
for
(
int
i
=
0
;
i
<
boxes_num
;
i
++
){
for
(
int
i
=
0
;
i
<
boxes_num
;
i
++
)
{
int
nblock
=
i
/
THREADS_PER_BLOCK_NMS
;
int
nblock
=
i
/
THREADS_PER_BLOCK_NMS
;
int
inblock
=
i
%
THREADS_PER_BLOCK_NMS
;
int
inblock
=
i
%
THREADS_PER_BLOCK_NMS
;
if
(
!
(
remv_cpu
[
nblock
]
&
(
1ULL
<<
inblock
))){
if
(
!
(
remv_cpu
[
nblock
]
&
(
1ULL
<<
inblock
)))
{
keep_data
[
num_to_keep
++
]
=
i
;
keep_data
[
num_to_keep
++
]
=
i
;
unsigned
long
long
*
p
=
&
mask_cpu
[
0
]
+
i
*
col_blocks
;
unsigned
long
long
*
p
=
&
mask_cpu
[
0
]
+
i
*
col_blocks
;
for
(
int
j
=
nblock
;
j
<
col_blocks
;
j
++
){
for
(
int
j
=
nblock
;
j
<
col_blocks
;
j
++
)
{
remv_cpu
[
j
]
|=
p
[
j
];
remv_cpu
[
j
]
|=
p
[
j
];
}
}
}
}
}
if
(
cudaSuccess
!=
cudaGetLastError
()
)
printf
(
"Error!
\n
"
);
}
if
(
cudaSuccess
!=
cudaGetLastError
())
printf
(
"Error!
\n
"
);
return
num_to_keep
;
return
num_to_keep
;
}
}
int
nms_normal_gpu
(
at
::
Tensor
boxes
,
at
::
Tensor
keep
,
float
nms_overlap_thresh
)
{
// params boxes: (N, 5) [x1, y1, x2, y2, ry]
// params keep: (N)
int
nms_normal_gpu
(
at
::
Tensor
boxes
,
at
::
Tensor
keep
,
float
nms_overlap_thresh
){
CHECK_INPUT
(
boxes
);
// params boxes: (N, 5) [x1, y1, x2, y2, ry]
CHECK_CONTIGUOUS
(
keep
);
// params keep: (N)
CHECK_INPUT
(
boxes
);
CHECK_CONTIGUOUS
(
keep
);
int
boxes_num
=
boxes
.
size
(
0
);
int
boxes_num
=
boxes
.
size
(
0
);
const
float
*
boxes_data
=
boxes
.
data
<
float
>
();
const
float
*
boxes_data
=
boxes
.
data
_ptr
<
float
>
();
long
*
keep_data
=
keep
.
data
<
long
>
();
long
*
keep_data
=
keep
.
data
_ptr
<
long
>
();
const
int
col_blocks
=
DIVUP
(
boxes_num
,
THREADS_PER_BLOCK_NMS
);
const
int
col_blocks
=
DIVUP
(
boxes_num
,
THREADS_PER_BLOCK_NMS
);
unsigned
long
long
*
mask_data
=
NULL
;
unsigned
long
long
*
mask_data
=
NULL
;
CHECK_ERROR
(
cudaMalloc
((
void
**
)
&
mask_data
,
boxes_num
*
col_blocks
*
sizeof
(
unsigned
long
long
)));
CHECK_ERROR
(
cudaMalloc
((
void
**
)
&
mask_data
,
nmsNormalLauncher
(
boxes_data
,
mask_data
,
boxes_num
,
nms_overlap_thresh
);
boxes_num
*
col_blocks
*
sizeof
(
unsigned
long
long
)));
nmsNormalLauncher
(
boxes_data
,
mask_data
,
boxes_num
,
nms_overlap_thresh
);
// unsigned long long mask_cpu[boxes_num * col_blocks];
// unsigned long long mask_cpu[boxes_num * col_blocks];
// unsigned long long *mask_cpu = new unsigned long long [boxes_num * col_blocks];
// unsigned long long *mask_cpu = new unsigned long long [boxes_num *
std
::
vector
<
unsigned
long
long
>
mask_cpu
(
boxes_num
*
col_blocks
);
// col_blocks];
std
::
vector
<
unsigned
long
long
>
mask_cpu
(
boxes_num
*
col_blocks
);
// printf("boxes_num=%d, col_blocks=%d\n", boxes_num, col_blocks);
// printf("boxes_num=%d, col_blocks=%d\n", boxes_num, col_blocks);
CHECK_ERROR
(
cudaMemcpy
(
&
mask_cpu
[
0
],
mask_data
,
boxes_num
*
col_blocks
*
sizeof
(
unsigned
long
long
),
CHECK_ERROR
(
cudaMemcpy
(
&
mask_cpu
[
0
],
mask_data
,
cudaMemcpyDeviceToHost
));
boxes_num
*
col_blocks
*
sizeof
(
unsigned
long
long
),
cudaMemcpyDeviceToHost
));
cudaFree
(
mask_data
);
cudaFree
(
mask_data
);
unsigned
long
long
remv_cpu
[
col_blocks
];
unsigned
long
long
remv_cpu
[
col_blocks
];
memset
(
remv_cpu
,
0
,
col_blocks
*
sizeof
(
unsigned
long
long
));
memset
(
remv_cpu
,
0
,
col_blocks
*
sizeof
(
unsigned
long
long
));
int
num_to_keep
=
0
;
int
num_to_keep
=
0
;
for
(
int
i
=
0
;
i
<
boxes_num
;
i
++
){
for
(
int
i
=
0
;
i
<
boxes_num
;
i
++
)
{
int
nblock
=
i
/
THREADS_PER_BLOCK_NMS
;
int
nblock
=
i
/
THREADS_PER_BLOCK_NMS
;
int
inblock
=
i
%
THREADS_PER_BLOCK_NMS
;
int
inblock
=
i
%
THREADS_PER_BLOCK_NMS
;
if
(
!
(
remv_cpu
[
nblock
]
&
(
1ULL
<<
inblock
))){
if
(
!
(
remv_cpu
[
nblock
]
&
(
1ULL
<<
inblock
)))
{
keep_data
[
num_to_keep
++
]
=
i
;
keep_data
[
num_to_keep
++
]
=
i
;
unsigned
long
long
*
p
=
&
mask_cpu
[
0
]
+
i
*
col_blocks
;
unsigned
long
long
*
p
=
&
mask_cpu
[
0
]
+
i
*
col_blocks
;
for
(
int
j
=
nblock
;
j
<
col_blocks
;
j
++
){
for
(
int
j
=
nblock
;
j
<
col_blocks
;
j
++
)
{
remv_cpu
[
j
]
|=
p
[
j
];
remv_cpu
[
j
]
|=
p
[
j
];
}
}
}
}
}
if
(
cudaSuccess
!=
cudaGetLastError
()
)
printf
(
"Error!
\n
"
);
}
if
(
cudaSuccess
!=
cudaGetLastError
())
printf
(
"Error!
\n
"
);
return
num_to_keep
;
return
num_to_keep
;
}
}
PYBIND11_MODULE
(
TORCH_EXTENSION_NAME
,
m
)
{
PYBIND11_MODULE
(
TORCH_EXTENSION_NAME
,
m
)
{
m
.
def
(
"boxes_overlap_bev_gpu"
,
&
boxes_overlap_bev_gpu
,
"oriented boxes overlap"
);
m
.
def
(
"boxes_overlap_bev_gpu"
,
&
boxes_overlap_bev_gpu
,
"oriented boxes overlap"
);
m
.
def
(
"boxes_iou_bev_gpu"
,
&
boxes_iou_bev_gpu
,
"oriented boxes iou"
);
m
.
def
(
"boxes_iou_bev_gpu"
,
&
boxes_iou_bev_gpu
,
"oriented boxes iou"
);
m
.
def
(
"nms_gpu"
,
&
nms_gpu
,
"oriented nms gpu"
);
m
.
def
(
"nms_gpu"
,
&
nms_gpu
,
"oriented nms gpu"
);
m
.
def
(
"nms_normal_gpu"
,
&
nms_normal_gpu
,
"nms gpu"
);
m
.
def
(
"nms_normal_gpu"
,
&
nms_normal_gpu
,
"nms gpu"
);
...
...
Prev
1
2
3
4
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment