Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
8acd5d54
"vscode:/vscode.git/clone" did not exist on "35c4b5ec16b5fac3564f1ab98543ac46be769f9a"
Commit
8acd5d54
authored
Jun 15, 2020
by
zhangwenwei
Browse files
Tune performance with new schedule
parent
f93167c3
Changes
27
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
33 additions
and
276 deletions
+33
-276
mmdet3d/datasets/kitti_dataset.py
mmdet3d/datasets/kitti_dataset.py
+11
-2
mmdet3d/datasets/loader/__init__.py
mmdet3d/datasets/loader/__init__.py
+0
-4
mmdet3d/datasets/loader/build_loader.py
mmdet3d/datasets/loader/build_loader.py
+0
-57
mmdet3d/datasets/loader/sampler.py
mmdet3d/datasets/loader/sampler.py
+0
-164
mmdet3d/datasets/utils.py
mmdet3d/datasets/utils.py
+0
-37
mmdet3d/models/roi_heads/part_aggregation_roi_head.py
mmdet3d/models/roi_heads/part_aggregation_roi_head.py
+21
-11
tests/test_heads.py
tests/test_heads.py
+1
-1
No files found.
mmdet3d/datasets/kitti_dataset.py
View file @
8acd5d54
...
...
@@ -11,7 +11,6 @@ from mmcv.utils import print_log
from
mmdet.datasets
import
DATASETS
from
..core.bbox
import
Box3DMode
,
CameraInstance3DBoxes
from
.custom_3d
import
Custom3DDataset
from
.utils
import
remove_dontcare
@
DATASETS
.
register_module
()
...
...
@@ -83,7 +82,7 @@ class KittiDataset(Custom3DDataset):
annos
=
info
[
'annos'
]
# we need other objects to avoid collision when sample
annos
=
remove_dontcare
(
annos
)
annos
=
self
.
remove_dontcare
(
annos
)
loc
=
annos
[
'location'
]
dims
=
annos
[
'dimensions'
]
rots
=
annos
[
'rotation_y'
]
...
...
@@ -128,6 +127,16 @@ class KittiDataset(Custom3DDataset):
inds
=
np
.
array
(
inds
,
dtype
=
np
.
int64
)
return
inds
def
remove_dontcare
(
self
,
ann_info
):
img_filtered_annotations
=
{}
relevant_annotation_indices
=
[
i
for
i
,
x
in
enumerate
(
ann_info
[
'name'
])
if
x
!=
'DontCare'
]
for
key
in
ann_info
.
keys
():
img_filtered_annotations
[
key
]
=
(
ann_info
[
key
][
relevant_annotation_indices
])
return
img_filtered_annotations
def
format_results
(
self
,
outputs
,
pklfile_prefix
=
None
,
...
...
mmdet3d/datasets/loader/__init__.py
deleted
100644 → 0
View file @
f93167c3
from
.build_loader
import
build_dataloader
from
.sampler
import
DistributedGroupSampler
,
GroupSampler
__all__
=
[
'GroupSampler'
,
'DistributedGroupSampler'
,
'build_dataloader'
]
mmdet3d/datasets/loader/build_loader.py
deleted
100644 → 0
View file @
f93167c3
import
platform
import
random
from
functools
import
partial
import
numpy
as
np
from
mmcv.parallel
import
collate
from
mmcv.runner
import
get_dist_info
from
torch.utils.data
import
DataLoader
from
.sampler
import
DistributedGroupSampler
,
DistributedSampler
,
GroupSampler
if
platform
.
system
()
!=
'Windows'
:
# https://github.com/pytorch/pytorch/issues/973
import
resource
rlimit
=
resource
.
getrlimit
(
resource
.
RLIMIT_NOFILE
)
resource
.
setrlimit
(
resource
.
RLIMIT_NOFILE
,
(
4096
,
rlimit
[
1
]))
def
build_dataloader
(
dataset
,
samples_per_gpu
,
workers_per_gpu
,
num_gpus
=
1
,
dist
=
True
,
seed
=
None
,
**
kwargs
):
shuffle
=
kwargs
.
get
(
'shuffle'
,
True
)
if
dist
:
rank
,
world_size
=
get_dist_info
()
if
shuffle
:
sampler
=
DistributedGroupSampler
(
dataset
,
samples_per_gpu
,
world_size
,
rank
)
else
:
sampler
=
DistributedSampler
(
dataset
,
world_size
,
rank
,
shuffle
=
False
)
batch_size
=
samples_per_gpu
num_workers
=
workers_per_gpu
else
:
sampler
=
GroupSampler
(
dataset
,
samples_per_gpu
)
if
shuffle
else
None
batch_size
=
num_gpus
*
samples_per_gpu
num_workers
=
num_gpus
*
workers_per_gpu
data_loader
=
DataLoader
(
dataset
,
batch_size
=
batch_size
,
sampler
=
sampler
,
num_workers
=
num_workers
,
collate_fn
=
partial
(
collate
,
samples_per_gpu
=
samples_per_gpu
),
pin_memory
=
False
,
worker_init_fn
=
worker_init_fn
if
seed
is
not
None
else
None
,
**
kwargs
)
return
data_loader
def
worker_init_fn
(
seed
):
np
.
random
.
seed
(
seed
)
random
.
seed
(
seed
)
mmdet3d/datasets/loader/sampler.py
deleted
100644 → 0
View file @
f93167c3
from
__future__
import
division
import
math
import
numpy
as
np
import
torch
from
mmcv.runner
import
get_dist_info
from
torch.utils.data
import
DistributedSampler
as
_DistributedSampler
from
torch.utils.data
import
Sampler
class
DistributedSampler
(
_DistributedSampler
):
def
__init__
(
self
,
dataset
,
num_replicas
=
None
,
rank
=
None
,
shuffle
=
True
):
super
().
__init__
(
dataset
,
num_replicas
=
num_replicas
,
rank
=
rank
)
self
.
shuffle
=
shuffle
def
__iter__
(
self
):
# deterministically shuffle based on epoch
if
self
.
shuffle
:
g
=
torch
.
Generator
()
g
.
manual_seed
(
self
.
epoch
)
indices
=
torch
.
randperm
(
len
(
self
.
dataset
),
generator
=
g
).
tolist
()
else
:
indices
=
torch
.
arange
(
len
(
self
.
dataset
)).
tolist
()
# add extra samples to make it evenly divisible
indices
+=
indices
[:(
self
.
total_size
-
len
(
indices
))]
assert
len
(
indices
)
==
self
.
total_size
# subsample
indices
=
indices
[
self
.
rank
:
self
.
total_size
:
self
.
num_replicas
]
assert
len
(
indices
)
==
self
.
num_samples
return
iter
(
indices
)
class
GroupSampler
(
Sampler
):
def
__init__
(
self
,
dataset
,
samples_per_gpu
=
1
):
assert
hasattr
(
dataset
,
'flag'
)
self
.
dataset
=
dataset
self
.
samples_per_gpu
=
samples_per_gpu
self
.
flag
=
dataset
.
flag
.
astype
(
np
.
int64
)
self
.
group_sizes
=
np
.
bincount
(
self
.
flag
)
self
.
num_samples
=
0
for
i
,
size
in
enumerate
(
self
.
group_sizes
):
self
.
num_samples
+=
int
(
np
.
ceil
(
size
/
self
.
samples_per_gpu
))
*
self
.
samples_per_gpu
def
__iter__
(
self
):
indices
=
[]
for
i
,
size
in
enumerate
(
self
.
group_sizes
):
if
size
==
0
:
continue
indice
=
np
.
where
(
self
.
flag
==
i
)[
0
]
assert
len
(
indice
)
==
size
np
.
random
.
shuffle
(
indice
)
num_extra
=
int
(
np
.
ceil
(
size
/
self
.
samples_per_gpu
)
)
*
self
.
samples_per_gpu
-
len
(
indice
)
indice
=
np
.
concatenate
(
[
indice
,
np
.
random
.
choice
(
indice
,
num_extra
)])
indices
.
append
(
indice
)
indices
=
np
.
concatenate
(
indices
)
indices
=
[
indices
[
i
*
self
.
samples_per_gpu
:(
i
+
1
)
*
self
.
samples_per_gpu
]
for
i
in
np
.
random
.
permutation
(
range
(
len
(
indices
)
//
self
.
samples_per_gpu
))
]
indices
=
np
.
concatenate
(
indices
)
indices
=
indices
.
astype
(
np
.
int64
).
tolist
()
assert
len
(
indices
)
==
self
.
num_samples
return
iter
(
indices
)
def
__len__
(
self
):
return
self
.
num_samples
class
DistributedGroupSampler
(
Sampler
):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def
__init__
(
self
,
dataset
,
samples_per_gpu
=
1
,
num_replicas
=
None
,
rank
=
None
):
_rank
,
_num_replicas
=
get_dist_info
()
if
num_replicas
is
None
:
num_replicas
=
_num_replicas
if
rank
is
None
:
rank
=
_rank
self
.
dataset
=
dataset
self
.
samples_per_gpu
=
samples_per_gpu
self
.
num_replicas
=
num_replicas
self
.
rank
=
rank
self
.
epoch
=
0
assert
hasattr
(
self
.
dataset
,
'flag'
)
self
.
flag
=
self
.
dataset
.
flag
self
.
group_sizes
=
np
.
bincount
(
self
.
flag
)
self
.
num_samples
=
0
for
i
,
j
in
enumerate
(
self
.
group_sizes
):
self
.
num_samples
+=
int
(
math
.
ceil
(
self
.
group_sizes
[
i
]
*
1.0
/
self
.
samples_per_gpu
/
self
.
num_replicas
))
*
self
.
samples_per_gpu
self
.
total_size
=
self
.
num_samples
*
self
.
num_replicas
def
__iter__
(
self
):
# deterministically shuffle based on epoch
g
=
torch
.
Generator
()
g
.
manual_seed
(
self
.
epoch
)
indices
=
[]
for
i
,
size
in
enumerate
(
self
.
group_sizes
):
if
size
>
0
:
indice
=
np
.
where
(
self
.
flag
==
i
)[
0
]
assert
len
(
indice
)
==
size
indice
=
indice
[
list
(
torch
.
randperm
(
int
(
size
),
generator
=
g
))].
tolist
()
extra
=
int
(
math
.
ceil
(
size
*
1.0
/
self
.
samples_per_gpu
/
self
.
num_replicas
)
)
*
self
.
samples_per_gpu
*
self
.
num_replicas
-
len
(
indice
)
# pad indice
tmp
=
indice
.
copy
()
for
_
in
range
(
extra
//
size
):
indice
.
extend
(
tmp
)
indice
.
extend
(
tmp
[:
extra
%
size
])
indices
.
extend
(
indice
)
assert
len
(
indices
)
==
self
.
total_size
indices
=
[
indices
[
j
]
for
i
in
list
(
torch
.
randperm
(
len
(
indices
)
//
self
.
samples_per_gpu
,
generator
=
g
))
for
j
in
range
(
i
*
self
.
samples_per_gpu
,
(
i
+
1
)
*
self
.
samples_per_gpu
)
]
# subsample
offset
=
self
.
num_samples
*
self
.
rank
indices
=
indices
[
offset
:
offset
+
self
.
num_samples
]
assert
len
(
indices
)
==
self
.
num_samples
return
iter
(
indices
)
def
__len__
(
self
):
return
self
.
num_samples
def
set_epoch
(
self
,
epoch
):
self
.
epoch
=
epoch
mmdet3d/datasets/utils.py
deleted
100644 → 0
View file @
f93167c3
from
collections
import
Sequence
import
mmcv
import
numpy
as
np
import
torch
def
remove_dontcare
(
image_anno
):
img_filtered_annotations
=
{}
relevant_annotation_indices
=
[
i
for
i
,
x
in
enumerate
(
image_anno
[
'name'
])
if
x
!=
'DontCare'
]
for
key
in
image_anno
.
keys
():
img_filtered_annotations
[
key
]
=
(
image_anno
[
key
][
relevant_annotation_indices
])
return
img_filtered_annotations
def
to_tensor
(
data
):
# TODO: remove this duplicated method in the future
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if
isinstance
(
data
,
torch
.
Tensor
):
return
data
elif
isinstance
(
data
,
np
.
ndarray
):
return
torch
.
from_numpy
(
data
)
elif
isinstance
(
data
,
Sequence
)
and
not
mmcv
.
is_str
(
data
):
return
torch
.
tensor
(
data
)
elif
isinstance
(
data
,
int
):
return
torch
.
LongTensor
([
data
])
elif
isinstance
(
data
,
float
):
return
torch
.
FloatTensor
([
data
])
else
:
raise
TypeError
(
'type {} cannot be converted to tensor.'
.
format
(
type
(
data
)))
mmdet3d/models/roi_heads/part_aggregation_roi_head.py
View file @
8acd5d54
...
...
@@ -68,8 +68,14 @@ class PartAggregationROIHead(Base3DRoIHead):
voxels_dict (dict): Contains information of voxels.
img_metas (list[dict]): Meta info of each image.
proposal_list (list[dict]): Proposal information from rpn.
gt_bboxes_3d (list[FloatTensor]): GT bboxes of each batch.
gt_labels_3d (list[LongTensor]): GT labels of each batch.
The dictionary should contain the following keys:
- boxes_3d (:obj:BaseInstance3DBoxes): Proposal bboxes
- labels_3d (torch.Tensor): Labels of proposals
- cls_preds (torch.Tensor): Original scores of proposals
gt_bboxes_3d (list[:obj:BaseInstance3DBoxes]):
GT bboxes of each sample. The bboxes are encapsulated
by 3D box structures.
gt_labels_3d (list[LongTensor]): GT labels of each sample.
Returns:
dict: losses from each head.
...
...
@@ -178,17 +184,19 @@ class PartAggregationROIHead(Base3DRoIHead):
cur_gt_labels
=
gt_labels_3d
[
batch_idx
]
batch_num_gts
=
0
batch_gt_indis
=
cur_gt_labels
.
new_full
((
cur_boxes
.
shape
[
0
],
),
0
)
# 0 is bg
batch_max_overlaps
=
cur_boxes
.
new_zeros
(
cur_boxes
.
shape
[
0
])
batch_gt_labels
=
cur_gt_labels
.
new_full
((
cur_boxes
.
shape
[
0
],
),
-
1
)
# -1 is bg
if
isinstance
(
self
.
bbox_assigner
,
list
):
# for multi classes
# 0 is bg
batch_gt_indis
=
cur_gt_labels
.
new_full
((
len
(
cur_boxes
),
),
0
)
batch_max_overlaps
=
cur_boxes
.
tensor
.
new_zeros
(
len
(
cur_boxes
))
# -1 is bg
batch_gt_labels
=
cur_gt_labels
.
new_full
((
len
(
cur_boxes
),
),
-
1
)
# each class may have its own assigner
if
isinstance
(
self
.
bbox_assigner
,
list
):
for
i
,
assigner
in
enumerate
(
self
.
bbox_assigner
):
gt_per_cls
=
(
cur_gt_labels
==
i
)
pred_per_cls
=
(
cur_labels_3d
==
i
)
cur_assign_res
=
assigner
.
assign
(
cur_boxes
[
pred_per_cls
],
cur_boxes
.
tensor
[
pred_per_cls
],
cur_gt_bboxes
.
tensor
[
gt_per_cls
],
gt_labels
=
cur_gt_labels
[
gt_per_cls
])
# gather assign_results in different class into one result
...
...
@@ -215,10 +223,12 @@ class PartAggregationROIHead(Base3DRoIHead):
batch_gt_labels
)
else
:
# for single class
assign_result
=
self
.
bbox_assigner
.
assign
(
cur_boxes
,
cur_gt_bboxes
.
tensor
,
gt_labels
=
cur_gt_labels
)
cur_boxes
.
tensor
,
cur_gt_bboxes
.
tensor
,
gt_labels
=
cur_gt_labels
)
# sample boxes
sampling_result
=
self
.
bbox_sampler
.
sample
(
assign_result
,
cur_boxes
,
cur_boxes
.
tensor
,
cur_gt_bboxes
.
tensor
,
cur_gt_labels
)
sampling_results
.
append
(
sampling_result
)
...
...
tests/test_heads.py
View file @
8acd5d54
...
...
@@ -154,7 +154,7 @@ def test_parta2_rpnhead_getboxes():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
rpn_head_cfg
,
proposal_cfg
=
_get_rpn_head_cfg
(
'kitti/hv_PartA2_secfpn_
4
x8_cyclic_80e_kitti-3d-3class.py'
)
'kitti/hv_PartA2_secfpn_
2
x8_cyclic_80e_kitti-3d-3class.py'
)
from
mmdet3d.models.builder
import
build_head
self
=
build_head
(
rpn_head_cfg
)
...
...
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment