Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
0b758185
Commit
0b758185
authored
Jun 15, 2020
by
liyinhao
Browse files
merge master
parents
0b882582
788f9b3e
Changes
30
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
306 additions
and
285 deletions
+306
-285
mmdet3d/datasets/kitti2d_dataset.py
mmdet3d/datasets/kitti2d_dataset.py
+1
-9
mmdet3d/datasets/kitti_dataset.py
mmdet3d/datasets/kitti_dataset.py
+11
-2
mmdet3d/datasets/loader/__init__.py
mmdet3d/datasets/loader/__init__.py
+0
-4
mmdet3d/datasets/loader/build_loader.py
mmdet3d/datasets/loader/build_loader.py
+0
-57
mmdet3d/datasets/loader/sampler.py
mmdet3d/datasets/loader/sampler.py
+0
-164
mmdet3d/datasets/utils.py
mmdet3d/datasets/utils.py
+0
-37
mmdet3d/models/roi_heads/part_aggregation_roi_head.py
mmdet3d/models/roi_heads/part_aggregation_roi_head.py
+21
-11
tests/test_heads.py
tests/test_heads.py
+1
-1
tools/analyze_logs.py
tools/analyze_logs.py
+179
-0
tools/benchmark.py
tools/benchmark.py
+93
-0
No files found.
mmdet3d/datasets/kitti2d_dataset.py
View file @
0b758185
...
...
@@ -53,7 +53,7 @@ class Kitti2DDataset(CustomDataset):
self
.
data_infos
=
mmcv
.
load
(
ann_file
)
self
.
cat2label
=
{
cat_name
:
i
for
i
,
cat_name
in
enumerate
(
self
.
class_names
)
for
i
,
cat_name
in
enumerate
(
self
.
CLASSES
)
}
return
self
.
data_infos
...
...
@@ -107,14 +107,6 @@ class Kitti2DDataset(CustomDataset):
self
.
pre_pipeline
(
results
)
return
self
.
pipeline
(
results
)
def
_set_group_flag
(
self
):
"""Set flag according to image aspect ratio.
Images with aspect ratio greater than 1 will be set as group 1,
otherwise group 0.
In kitti's pcd, they are all the same, thus are all zeros
"""
self
.
flag
=
np
.
zeros
(
len
(
self
),
dtype
=
np
.
uint8
)
def
drop_arrays_by_name
(
self
,
gt_names
,
used_classes
):
inds
=
[
i
for
i
,
x
in
enumerate
(
gt_names
)
if
x
not
in
used_classes
]
inds
=
np
.
array
(
inds
,
dtype
=
np
.
int64
)
...
...
mmdet3d/datasets/kitti_dataset.py
View file @
0b758185
...
...
@@ -11,7 +11,6 @@ from mmcv.utils import print_log
from
mmdet.datasets
import
DATASETS
from
..core.bbox
import
Box3DMode
,
CameraInstance3DBoxes
from
.custom_3d
import
Custom3DDataset
from
.utils
import
remove_dontcare
@
DATASETS
.
register_module
()
...
...
@@ -83,7 +82,7 @@ class KittiDataset(Custom3DDataset):
annos
=
info
[
'annos'
]
# we need other objects to avoid collision when sample
annos
=
remove_dontcare
(
annos
)
annos
=
self
.
remove_dontcare
(
annos
)
loc
=
annos
[
'location'
]
dims
=
annos
[
'dimensions'
]
rots
=
annos
[
'rotation_y'
]
...
...
@@ -128,6 +127,16 @@ class KittiDataset(Custom3DDataset):
inds
=
np
.
array
(
inds
,
dtype
=
np
.
int64
)
return
inds
def
remove_dontcare
(
self
,
ann_info
):
img_filtered_annotations
=
{}
relevant_annotation_indices
=
[
i
for
i
,
x
in
enumerate
(
ann_info
[
'name'
])
if
x
!=
'DontCare'
]
for
key
in
ann_info
.
keys
():
img_filtered_annotations
[
key
]
=
(
ann_info
[
key
][
relevant_annotation_indices
])
return
img_filtered_annotations
def
format_results
(
self
,
outputs
,
pklfile_prefix
=
None
,
...
...
mmdet3d/datasets/loader/__init__.py
deleted
100644 → 0
View file @
0b882582
from
.build_loader
import
build_dataloader
from
.sampler
import
DistributedGroupSampler
,
GroupSampler
__all__
=
[
'GroupSampler'
,
'DistributedGroupSampler'
,
'build_dataloader'
]
mmdet3d/datasets/loader/build_loader.py
deleted
100644 → 0
View file @
0b882582
import
platform
import
random
from
functools
import
partial
import
numpy
as
np
from
mmcv.parallel
import
collate
from
mmcv.runner
import
get_dist_info
from
torch.utils.data
import
DataLoader
from
.sampler
import
DistributedGroupSampler
,
DistributedSampler
,
GroupSampler
if
platform
.
system
()
!=
'Windows'
:
# https://github.com/pytorch/pytorch/issues/973
import
resource
rlimit
=
resource
.
getrlimit
(
resource
.
RLIMIT_NOFILE
)
resource
.
setrlimit
(
resource
.
RLIMIT_NOFILE
,
(
4096
,
rlimit
[
1
]))
def
build_dataloader
(
dataset
,
samples_per_gpu
,
workers_per_gpu
,
num_gpus
=
1
,
dist
=
True
,
seed
=
None
,
**
kwargs
):
shuffle
=
kwargs
.
get
(
'shuffle'
,
True
)
if
dist
:
rank
,
world_size
=
get_dist_info
()
if
shuffle
:
sampler
=
DistributedGroupSampler
(
dataset
,
samples_per_gpu
,
world_size
,
rank
)
else
:
sampler
=
DistributedSampler
(
dataset
,
world_size
,
rank
,
shuffle
=
False
)
batch_size
=
samples_per_gpu
num_workers
=
workers_per_gpu
else
:
sampler
=
GroupSampler
(
dataset
,
samples_per_gpu
)
if
shuffle
else
None
batch_size
=
num_gpus
*
samples_per_gpu
num_workers
=
num_gpus
*
workers_per_gpu
data_loader
=
DataLoader
(
dataset
,
batch_size
=
batch_size
,
sampler
=
sampler
,
num_workers
=
num_workers
,
collate_fn
=
partial
(
collate
,
samples_per_gpu
=
samples_per_gpu
),
pin_memory
=
False
,
worker_init_fn
=
worker_init_fn
if
seed
is
not
None
else
None
,
**
kwargs
)
return
data_loader
def
worker_init_fn
(
seed
):
np
.
random
.
seed
(
seed
)
random
.
seed
(
seed
)
mmdet3d/datasets/loader/sampler.py
deleted
100644 → 0
View file @
0b882582
from
__future__
import
division
import
math
import
numpy
as
np
import
torch
from
mmcv.runner
import
get_dist_info
from
torch.utils.data
import
DistributedSampler
as
_DistributedSampler
from
torch.utils.data
import
Sampler
class
DistributedSampler
(
_DistributedSampler
):
def
__init__
(
self
,
dataset
,
num_replicas
=
None
,
rank
=
None
,
shuffle
=
True
):
super
().
__init__
(
dataset
,
num_replicas
=
num_replicas
,
rank
=
rank
)
self
.
shuffle
=
shuffle
def
__iter__
(
self
):
# deterministically shuffle based on epoch
if
self
.
shuffle
:
g
=
torch
.
Generator
()
g
.
manual_seed
(
self
.
epoch
)
indices
=
torch
.
randperm
(
len
(
self
.
dataset
),
generator
=
g
).
tolist
()
else
:
indices
=
torch
.
arange
(
len
(
self
.
dataset
)).
tolist
()
# add extra samples to make it evenly divisible
indices
+=
indices
[:(
self
.
total_size
-
len
(
indices
))]
assert
len
(
indices
)
==
self
.
total_size
# subsample
indices
=
indices
[
self
.
rank
:
self
.
total_size
:
self
.
num_replicas
]
assert
len
(
indices
)
==
self
.
num_samples
return
iter
(
indices
)
class
GroupSampler
(
Sampler
):
def
__init__
(
self
,
dataset
,
samples_per_gpu
=
1
):
assert
hasattr
(
dataset
,
'flag'
)
self
.
dataset
=
dataset
self
.
samples_per_gpu
=
samples_per_gpu
self
.
flag
=
dataset
.
flag
.
astype
(
np
.
int64
)
self
.
group_sizes
=
np
.
bincount
(
self
.
flag
)
self
.
num_samples
=
0
for
i
,
size
in
enumerate
(
self
.
group_sizes
):
self
.
num_samples
+=
int
(
np
.
ceil
(
size
/
self
.
samples_per_gpu
))
*
self
.
samples_per_gpu
def
__iter__
(
self
):
indices
=
[]
for
i
,
size
in
enumerate
(
self
.
group_sizes
):
if
size
==
0
:
continue
indice
=
np
.
where
(
self
.
flag
==
i
)[
0
]
assert
len
(
indice
)
==
size
np
.
random
.
shuffle
(
indice
)
num_extra
=
int
(
np
.
ceil
(
size
/
self
.
samples_per_gpu
)
)
*
self
.
samples_per_gpu
-
len
(
indice
)
indice
=
np
.
concatenate
(
[
indice
,
np
.
random
.
choice
(
indice
,
num_extra
)])
indices
.
append
(
indice
)
indices
=
np
.
concatenate
(
indices
)
indices
=
[
indices
[
i
*
self
.
samples_per_gpu
:(
i
+
1
)
*
self
.
samples_per_gpu
]
for
i
in
np
.
random
.
permutation
(
range
(
len
(
indices
)
//
self
.
samples_per_gpu
))
]
indices
=
np
.
concatenate
(
indices
)
indices
=
indices
.
astype
(
np
.
int64
).
tolist
()
assert
len
(
indices
)
==
self
.
num_samples
return
iter
(
indices
)
def
__len__
(
self
):
return
self
.
num_samples
class
DistributedGroupSampler
(
Sampler
):
"""Sampler that restricts data loading to a subset of the dataset.
It is especially useful in conjunction with
:class:`torch.nn.parallel.DistributedDataParallel`. In such case, each
process can pass a DistributedSampler instance as a DataLoader sampler,
and load a subset of the original dataset that is exclusive to it.
.. note::
Dataset is assumed to be of constant size.
Arguments:
dataset: Dataset used for sampling.
num_replicas (optional): Number of processes participating in
distributed training.
rank (optional): Rank of the current process within num_replicas.
"""
def
__init__
(
self
,
dataset
,
samples_per_gpu
=
1
,
num_replicas
=
None
,
rank
=
None
):
_rank
,
_num_replicas
=
get_dist_info
()
if
num_replicas
is
None
:
num_replicas
=
_num_replicas
if
rank
is
None
:
rank
=
_rank
self
.
dataset
=
dataset
self
.
samples_per_gpu
=
samples_per_gpu
self
.
num_replicas
=
num_replicas
self
.
rank
=
rank
self
.
epoch
=
0
assert
hasattr
(
self
.
dataset
,
'flag'
)
self
.
flag
=
self
.
dataset
.
flag
self
.
group_sizes
=
np
.
bincount
(
self
.
flag
)
self
.
num_samples
=
0
for
i
,
j
in
enumerate
(
self
.
group_sizes
):
self
.
num_samples
+=
int
(
math
.
ceil
(
self
.
group_sizes
[
i
]
*
1.0
/
self
.
samples_per_gpu
/
self
.
num_replicas
))
*
self
.
samples_per_gpu
self
.
total_size
=
self
.
num_samples
*
self
.
num_replicas
def
__iter__
(
self
):
# deterministically shuffle based on epoch
g
=
torch
.
Generator
()
g
.
manual_seed
(
self
.
epoch
)
indices
=
[]
for
i
,
size
in
enumerate
(
self
.
group_sizes
):
if
size
>
0
:
indice
=
np
.
where
(
self
.
flag
==
i
)[
0
]
assert
len
(
indice
)
==
size
indice
=
indice
[
list
(
torch
.
randperm
(
int
(
size
),
generator
=
g
))].
tolist
()
extra
=
int
(
math
.
ceil
(
size
*
1.0
/
self
.
samples_per_gpu
/
self
.
num_replicas
)
)
*
self
.
samples_per_gpu
*
self
.
num_replicas
-
len
(
indice
)
# pad indice
tmp
=
indice
.
copy
()
for
_
in
range
(
extra
//
size
):
indice
.
extend
(
tmp
)
indice
.
extend
(
tmp
[:
extra
%
size
])
indices
.
extend
(
indice
)
assert
len
(
indices
)
==
self
.
total_size
indices
=
[
indices
[
j
]
for
i
in
list
(
torch
.
randperm
(
len
(
indices
)
//
self
.
samples_per_gpu
,
generator
=
g
))
for
j
in
range
(
i
*
self
.
samples_per_gpu
,
(
i
+
1
)
*
self
.
samples_per_gpu
)
]
# subsample
offset
=
self
.
num_samples
*
self
.
rank
indices
=
indices
[
offset
:
offset
+
self
.
num_samples
]
assert
len
(
indices
)
==
self
.
num_samples
return
iter
(
indices
)
def
__len__
(
self
):
return
self
.
num_samples
def
set_epoch
(
self
,
epoch
):
self
.
epoch
=
epoch
mmdet3d/datasets/utils.py
deleted
100644 → 0
View file @
0b882582
from
collections
import
Sequence
import
mmcv
import
numpy
as
np
import
torch
def
remove_dontcare
(
image_anno
):
img_filtered_annotations
=
{}
relevant_annotation_indices
=
[
i
for
i
,
x
in
enumerate
(
image_anno
[
'name'
])
if
x
!=
'DontCare'
]
for
key
in
image_anno
.
keys
():
img_filtered_annotations
[
key
]
=
(
image_anno
[
key
][
relevant_annotation_indices
])
return
img_filtered_annotations
def
to_tensor
(
data
):
# TODO: remove this duplicated method in the future
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if
isinstance
(
data
,
torch
.
Tensor
):
return
data
elif
isinstance
(
data
,
np
.
ndarray
):
return
torch
.
from_numpy
(
data
)
elif
isinstance
(
data
,
Sequence
)
and
not
mmcv
.
is_str
(
data
):
return
torch
.
tensor
(
data
)
elif
isinstance
(
data
,
int
):
return
torch
.
LongTensor
([
data
])
elif
isinstance
(
data
,
float
):
return
torch
.
FloatTensor
([
data
])
else
:
raise
TypeError
(
'type {} cannot be converted to tensor.'
.
format
(
type
(
data
)))
mmdet3d/models/roi_heads/part_aggregation_roi_head.py
View file @
0b758185
...
...
@@ -68,8 +68,14 @@ class PartAggregationROIHead(Base3DRoIHead):
voxels_dict (dict): Contains information of voxels.
img_metas (list[dict]): Meta info of each image.
proposal_list (list[dict]): Proposal information from rpn.
gt_bboxes_3d (list[FloatTensor]): GT bboxes of each batch.
gt_labels_3d (list[LongTensor]): GT labels of each batch.
The dictionary should contain the following keys:
- boxes_3d (:obj:BaseInstance3DBoxes): Proposal bboxes
- labels_3d (torch.Tensor): Labels of proposals
- cls_preds (torch.Tensor): Original scores of proposals
gt_bboxes_3d (list[:obj:BaseInstance3DBoxes]):
GT bboxes of each sample. The bboxes are encapsulated
by 3D box structures.
gt_labels_3d (list[LongTensor]): GT labels of each sample.
Returns:
dict: losses from each head.
...
...
@@ -178,17 +184,19 @@ class PartAggregationROIHead(Base3DRoIHead):
cur_gt_labels
=
gt_labels_3d
[
batch_idx
]
batch_num_gts
=
0
batch_gt_indis
=
cur_gt_labels
.
new_full
((
cur_boxes
.
shape
[
0
],
),
0
)
# 0 is bg
batch_max_overlaps
=
cur_boxes
.
new_zeros
(
cur_boxes
.
shape
[
0
])
batch_gt_labels
=
cur_gt_labels
.
new_full
((
cur_boxes
.
shape
[
0
],
),
-
1
)
# -1 is bg
if
isinstance
(
self
.
bbox_assigner
,
list
):
# for multi classes
# 0 is bg
batch_gt_indis
=
cur_gt_labels
.
new_full
((
len
(
cur_boxes
),
),
0
)
batch_max_overlaps
=
cur_boxes
.
tensor
.
new_zeros
(
len
(
cur_boxes
))
# -1 is bg
batch_gt_labels
=
cur_gt_labels
.
new_full
((
len
(
cur_boxes
),
),
-
1
)
# each class may have its own assigner
if
isinstance
(
self
.
bbox_assigner
,
list
):
for
i
,
assigner
in
enumerate
(
self
.
bbox_assigner
):
gt_per_cls
=
(
cur_gt_labels
==
i
)
pred_per_cls
=
(
cur_labels_3d
==
i
)
cur_assign_res
=
assigner
.
assign
(
cur_boxes
[
pred_per_cls
],
cur_boxes
.
tensor
[
pred_per_cls
],
cur_gt_bboxes
.
tensor
[
gt_per_cls
],
gt_labels
=
cur_gt_labels
[
gt_per_cls
])
# gather assign_results in different class into one result
...
...
@@ -215,10 +223,12 @@ class PartAggregationROIHead(Base3DRoIHead):
batch_gt_labels
)
else
:
# for single class
assign_result
=
self
.
bbox_assigner
.
assign
(
cur_boxes
,
cur_gt_bboxes
.
tensor
,
gt_labels
=
cur_gt_labels
)
cur_boxes
.
tensor
,
cur_gt_bboxes
.
tensor
,
gt_labels
=
cur_gt_labels
)
# sample boxes
sampling_result
=
self
.
bbox_sampler
.
sample
(
assign_result
,
cur_boxes
,
cur_boxes
.
tensor
,
cur_gt_bboxes
.
tensor
,
cur_gt_labels
)
sampling_results
.
append
(
sampling_result
)
...
...
tests/test_heads.py
View file @
0b758185
...
...
@@ -154,7 +154,7 @@ def test_parta2_rpnhead_getboxes():
if
not
torch
.
cuda
.
is_available
():
pytest
.
skip
(
'test requires GPU and torch+cuda'
)
rpn_head_cfg
,
proposal_cfg
=
_get_rpn_head_cfg
(
'kitti/hv_PartA2_secfpn_
4
x8_cyclic_80e_kitti-3d-3class.py'
)
'kitti/hv_PartA2_secfpn_
2
x8_cyclic_80e_kitti-3d-3class.py'
)
from
mmdet3d.models.builder
import
build_head
self
=
build_head
(
rpn_head_cfg
)
...
...
tools/analyze_logs.py
0 → 100644
View file @
0b758185
import
argparse
import
json
from
collections
import
defaultdict
import
matplotlib.pyplot
as
plt
import
numpy
as
np
import
seaborn
as
sns
def
cal_train_time
(
log_dicts
,
args
):
for
i
,
log_dict
in
enumerate
(
log_dicts
):
print
(
f
'
{
"-"
*
5
}
Analyze train time of
{
args
.
json_logs
[
i
]
}{
"-"
*
5
}
'
)
all_times
=
[]
for
epoch
in
log_dict
.
keys
():
if
args
.
include_outliers
:
all_times
.
append
(
log_dict
[
epoch
][
'time'
])
else
:
all_times
.
append
(
log_dict
[
epoch
][
'time'
][
1
:])
all_times
=
np
.
array
(
all_times
)
epoch_ave_time
=
all_times
.
mean
(
-
1
)
slowest_epoch
=
epoch_ave_time
.
argmax
()
fastest_epoch
=
epoch_ave_time
.
argmin
()
std_over_epoch
=
epoch_ave_time
.
std
()
print
(
f
'slowest epoch
{
slowest_epoch
+
1
}
, '
f
'average time is
{
epoch_ave_time
[
slowest_epoch
]:.
4
f
}
'
)
print
(
f
'fastest epoch
{
fastest_epoch
+
1
}
, '
f
'average time is
{
epoch_ave_time
[
fastest_epoch
]:.
4
f
}
'
)
print
(
f
'time std over epochs is
{
std_over_epoch
:.
4
f
}
'
)
print
(
f
'average iter time:
{
np
.
mean
(
all_times
):.
4
f
}
s/iter'
)
print
()
def
plot_curve
(
log_dicts
,
args
):
if
args
.
backend
is
not
None
:
plt
.
switch_backend
(
args
.
backend
)
sns
.
set_style
(
args
.
style
)
# if legend is None, use {filename}_{key} as legend
legend
=
args
.
legend
if
legend
is
None
:
legend
=
[]
for
json_log
in
args
.
json_logs
:
for
metric
in
args
.
keys
:
legend
.
append
(
f
'
{
json_log
}
_
{
metric
}
'
)
assert
len
(
legend
)
==
(
len
(
args
.
json_logs
)
*
len
(
args
.
keys
))
metrics
=
args
.
keys
num_metrics
=
len
(
metrics
)
for
i
,
log_dict
in
enumerate
(
log_dicts
):
epochs
=
list
(
log_dict
.
keys
())
for
j
,
metric
in
enumerate
(
metrics
):
print
(
f
'plot curve of
{
args
.
json_logs
[
i
]
}
, metric is
{
metric
}
'
)
if
metric
not
in
log_dict
[
epochs
[
0
]]:
raise
KeyError
(
f
'
{
args
.
json_logs
[
i
]
}
does not contain metric
{
metric
}
'
)
if
'mAP'
in
metric
:
xs
=
np
.
arange
(
1
,
max
(
epochs
)
+
1
)
ys
=
[]
for
epoch
in
epochs
:
ys
+=
log_dict
[
epoch
][
metric
]
ax
=
plt
.
gca
()
ax
.
set_xticks
(
xs
)
plt
.
xlabel
(
'epoch'
)
plt
.
plot
(
xs
,
ys
,
label
=
legend
[
i
*
num_metrics
+
j
],
marker
=
'o'
)
else
:
xs
=
[]
ys
=
[]
num_iters_per_epoch
=
log_dict
[
epochs
[
0
]][
'iter'
][
-
1
]
for
epoch
in
epochs
:
iters
=
log_dict
[
epoch
][
'iter'
]
if
log_dict
[
epoch
][
'mode'
][
-
1
]
==
'val'
:
iters
=
iters
[:
-
1
]
xs
.
append
(
np
.
array
(
iters
)
+
(
epoch
-
1
)
*
num_iters_per_epoch
)
ys
.
append
(
np
.
array
(
log_dict
[
epoch
][
metric
][:
len
(
iters
)]))
xs
=
np
.
concatenate
(
xs
)
ys
=
np
.
concatenate
(
ys
)
plt
.
xlabel
(
'iter'
)
plt
.
plot
(
xs
,
ys
,
label
=
legend
[
i
*
num_metrics
+
j
],
linewidth
=
0.5
)
plt
.
legend
()
if
args
.
title
is
not
None
:
plt
.
title
(
args
.
title
)
if
args
.
out
is
None
:
plt
.
show
()
else
:
print
(
f
'save curve to:
{
args
.
out
}
'
)
plt
.
savefig
(
args
.
out
)
plt
.
cla
()
def
add_plot_parser
(
subparsers
):
parser_plt
=
subparsers
.
add_parser
(
'plot_curve'
,
help
=
'parser for plotting curves'
)
parser_plt
.
add_argument
(
'json_logs'
,
type
=
str
,
nargs
=
'+'
,
help
=
'path of train log in json format'
)
parser_plt
.
add_argument
(
'--keys'
,
type
=
str
,
nargs
=
'+'
,
default
=
[
'mAP_0.25'
],
help
=
'the metric that you want to plot'
)
parser_plt
.
add_argument
(
'--title'
,
type
=
str
,
help
=
'title of figure'
)
parser_plt
.
add_argument
(
'--legend'
,
type
=
str
,
nargs
=
'+'
,
default
=
None
,
help
=
'legend of each plot'
)
parser_plt
.
add_argument
(
'--backend'
,
type
=
str
,
default
=
None
,
help
=
'backend of plt'
)
parser_plt
.
add_argument
(
'--style'
,
type
=
str
,
default
=
'dark'
,
help
=
'style of plt'
)
parser_plt
.
add_argument
(
'--out'
,
type
=
str
,
default
=
None
)
def
add_time_parser
(
subparsers
):
parser_time
=
subparsers
.
add_parser
(
'cal_train_time'
,
help
=
'parser for computing the average time per training iteration'
)
parser_time
.
add_argument
(
'json_logs'
,
type
=
str
,
nargs
=
'+'
,
help
=
'path of train log in json format'
)
parser_time
.
add_argument
(
'--include-outliers'
,
action
=
'store_true'
,
help
=
'include the first value of every epoch when computing '
'the average time'
)
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Analyze Json Log'
)
# currently only support plot curve and calculate average train time
subparsers
=
parser
.
add_subparsers
(
dest
=
'task'
,
help
=
'task parser'
)
add_plot_parser
(
subparsers
)
add_time_parser
(
subparsers
)
args
=
parser
.
parse_args
()
return
args
def
load_json_logs
(
json_logs
):
# load and convert json_logs to log_dict, key is epoch, value is a sub dict
# keys of sub dict is different metrics, e.g. memory, bbox_mAP
# value of sub dict is a list of corresponding values of all iterations
log_dicts
=
[
dict
()
for
_
in
json_logs
]
for
json_log
,
log_dict
in
zip
(
json_logs
,
log_dicts
):
with
open
(
json_log
,
'r'
)
as
log_file
:
for
line
in
log_file
:
log
=
json
.
loads
(
line
.
strip
())
# skip lines without `epoch` field
if
'epoch'
not
in
log
:
continue
epoch
=
log
.
pop
(
'epoch'
)
if
epoch
not
in
log_dict
:
log_dict
[
epoch
]
=
defaultdict
(
list
)
for
k
,
v
in
log
.
items
():
log_dict
[
epoch
][
k
].
append
(
v
)
return
log_dicts
def
main
():
args
=
parse_args
()
json_logs
=
args
.
json_logs
for
json_log
in
json_logs
:
assert
json_log
.
endswith
(
'.json'
)
log_dicts
=
load_json_logs
(
json_logs
)
eval
(
args
.
task
)(
log_dicts
,
args
)
if
__name__
==
'__main__'
:
main
()
tools/benchmark.py
0 → 100644
View file @
0b758185
import
argparse
import
time
import
torch
from
mmcv
import
Config
from
mmcv.parallel
import
MMDataParallel
from
mmcv.runner
import
load_checkpoint
from
tools.fuse_conv_bn
import
fuse_module
from
mmdet.core
import
wrap_fp16_model
from
mmdet.datasets
import
build_dataloader
,
build_dataset
from
mmdet.models
import
build_detector
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'MMDet benchmark a model'
)
parser
.
add_argument
(
'config'
,
help
=
'test config file path'
)
parser
.
add_argument
(
'checkpoint'
,
help
=
'checkpoint file'
)
parser
.
add_argument
(
'--log-interval'
,
default
=
50
,
help
=
'interval of logging'
)
parser
.
add_argument
(
'--fuse-conv-bn'
,
action
=
'store_true'
,
help
=
'Whether to fuse conv and bn, this will slightly increase'
'the inference speed'
)
args
=
parser
.
parse_args
()
return
args
def
main
():
args
=
parse_args
()
cfg
=
Config
.
fromfile
(
args
.
config
)
# set cudnn_benchmark
if
cfg
.
get
(
'cudnn_benchmark'
,
False
):
torch
.
backends
.
cudnn
.
benchmark
=
True
cfg
.
model
.
pretrained
=
None
cfg
.
data
.
test
.
test_mode
=
True
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset
=
build_dataset
(
cfg
.
data
.
test
)
data_loader
=
build_dataloader
(
dataset
,
samples_per_gpu
=
1
,
workers_per_gpu
=
cfg
.
data
.
workers_per_gpu
,
dist
=
False
,
shuffle
=
False
)
# build the model and load checkpoint
model
=
build_detector
(
cfg
.
model
,
train_cfg
=
None
,
test_cfg
=
cfg
.
test_cfg
)
fp16_cfg
=
cfg
.
get
(
'fp16'
,
None
)
if
fp16_cfg
is
not
None
:
wrap_fp16_model
(
model
)
load_checkpoint
(
model
,
args
.
checkpoint
,
map_location
=
'cpu'
)
if
args
.
fuse_conv_bn
:
model
=
fuse_module
(
model
)
model
=
MMDataParallel
(
model
,
device_ids
=
[
0
])
model
.
eval
()
# the first several iterations may be very slow so skip them
num_warmup
=
5
pure_inf_time
=
0
# benchmark with 2000 image and take the average
for
i
,
data
in
enumerate
(
data_loader
):
torch
.
cuda
.
synchronize
()
start_time
=
time
.
perf_counter
()
with
torch
.
no_grad
():
model
(
return_loss
=
False
,
rescale
=
True
,
**
data
)
torch
.
cuda
.
synchronize
()
elapsed
=
time
.
perf_counter
()
-
start_time
if
i
>=
num_warmup
:
pure_inf_time
+=
elapsed
if
(
i
+
1
)
%
args
.
log_interval
==
0
:
fps
=
(
i
+
1
-
num_warmup
)
/
pure_inf_time
print
(
f
'Done image [
{
i
+
1
:
<
3
}
/ 2000], fps:
{
fps
:.
1
f
}
img / s'
)
if
(
i
+
1
)
==
2000
:
pure_inf_time
+=
elapsed
fps
=
(
i
+
1
-
num_warmup
)
/
pure_inf_time
print
(
f
'Overall fps:
{
fps
:.
1
f
}
img / s'
)
break
if
__name__
==
'__main__'
:
main
()
Prev
1
2
Next
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment