Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
OpenDAS
mmdetection3d
Commits
4412595a
Commit
4412595a
authored
Jun 15, 2020
by
wuyuefeng
Committed by
zhangwenwei
Jun 15, 2020
Browse files
Some tools
parent
289971b2
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
273 additions
and
1 deletion
+273
-1
.isort.cfg
.isort.cfg
+1
-1
tools/analyze_logs.py
tools/analyze_logs.py
+179
-0
tools/benchmark.py
tools/benchmark.py
+93
-0
No files found.
.isort.cfg
View file @
4412595a
...
@@ -3,6 +3,6 @@ line_length = 79
...
@@ -3,6 +3,6 @@ line_length = 79
multi_line_output = 0
multi_line_output = 0
known_standard_library = setuptools
known_standard_library = setuptools
known_first_party = mmdet,mmdet3d
known_first_party = mmdet,mmdet3d
known_third_party = cv2,mmcv,numba,numpy,nuscenes,pycocotools,pyquaternion,pytest,scipy,shapely,six,skimage,terminaltables,torch,torchvision
known_third_party = cv2,
matplotlib,
mmcv,numba,numpy,nuscenes,pycocotools,pyquaternion,pytest,scipy,
seaborn,
shapely,six,skimage,terminaltables,torch,torchvision
no_lines_before = STDLIB,LOCALFOLDER
no_lines_before = STDLIB,LOCALFOLDER
default_section = THIRDPARTY
default_section = THIRDPARTY
tools/analyze_logs.py
0 → 100644
View file @
4412595a
import
argparse
import
json
from
collections
import
defaultdict
import
matplotlib.pyplot
as
plt
import
numpy
as
np
import
seaborn
as
sns
def
cal_train_time
(
log_dicts
,
args
):
for
i
,
log_dict
in
enumerate
(
log_dicts
):
print
(
f
'
{
"-"
*
5
}
Analyze train time of
{
args
.
json_logs
[
i
]
}{
"-"
*
5
}
'
)
all_times
=
[]
for
epoch
in
log_dict
.
keys
():
if
args
.
include_outliers
:
all_times
.
append
(
log_dict
[
epoch
][
'time'
])
else
:
all_times
.
append
(
log_dict
[
epoch
][
'time'
][
1
:])
all_times
=
np
.
array
(
all_times
)
epoch_ave_time
=
all_times
.
mean
(
-
1
)
slowest_epoch
=
epoch_ave_time
.
argmax
()
fastest_epoch
=
epoch_ave_time
.
argmin
()
std_over_epoch
=
epoch_ave_time
.
std
()
print
(
f
'slowest epoch
{
slowest_epoch
+
1
}
, '
f
'average time is
{
epoch_ave_time
[
slowest_epoch
]:.
4
f
}
'
)
print
(
f
'fastest epoch
{
fastest_epoch
+
1
}
, '
f
'average time is
{
epoch_ave_time
[
fastest_epoch
]:.
4
f
}
'
)
print
(
f
'time std over epochs is
{
std_over_epoch
:.
4
f
}
'
)
print
(
f
'average iter time:
{
np
.
mean
(
all_times
):.
4
f
}
s/iter'
)
print
()
def
plot_curve
(
log_dicts
,
args
):
if
args
.
backend
is
not
None
:
plt
.
switch_backend
(
args
.
backend
)
sns
.
set_style
(
args
.
style
)
# if legend is None, use {filename}_{key} as legend
legend
=
args
.
legend
if
legend
is
None
:
legend
=
[]
for
json_log
in
args
.
json_logs
:
for
metric
in
args
.
keys
:
legend
.
append
(
f
'
{
json_log
}
_
{
metric
}
'
)
assert
len
(
legend
)
==
(
len
(
args
.
json_logs
)
*
len
(
args
.
keys
))
metrics
=
args
.
keys
num_metrics
=
len
(
metrics
)
for
i
,
log_dict
in
enumerate
(
log_dicts
):
epochs
=
list
(
log_dict
.
keys
())
for
j
,
metric
in
enumerate
(
metrics
):
print
(
f
'plot curve of
{
args
.
json_logs
[
i
]
}
, metric is
{
metric
}
'
)
if
metric
not
in
log_dict
[
epochs
[
0
]]:
raise
KeyError
(
f
'
{
args
.
json_logs
[
i
]
}
does not contain metric
{
metric
}
'
)
if
'mAP'
in
metric
:
xs
=
np
.
arange
(
1
,
max
(
epochs
)
+
1
)
ys
=
[]
for
epoch
in
epochs
:
ys
+=
log_dict
[
epoch
][
metric
]
ax
=
plt
.
gca
()
ax
.
set_xticks
(
xs
)
plt
.
xlabel
(
'epoch'
)
plt
.
plot
(
xs
,
ys
,
label
=
legend
[
i
*
num_metrics
+
j
],
marker
=
'o'
)
else
:
xs
=
[]
ys
=
[]
num_iters_per_epoch
=
log_dict
[
epochs
[
0
]][
'iter'
][
-
1
]
for
epoch
in
epochs
:
iters
=
log_dict
[
epoch
][
'iter'
]
if
log_dict
[
epoch
][
'mode'
][
-
1
]
==
'val'
:
iters
=
iters
[:
-
1
]
xs
.
append
(
np
.
array
(
iters
)
+
(
epoch
-
1
)
*
num_iters_per_epoch
)
ys
.
append
(
np
.
array
(
log_dict
[
epoch
][
metric
][:
len
(
iters
)]))
xs
=
np
.
concatenate
(
xs
)
ys
=
np
.
concatenate
(
ys
)
plt
.
xlabel
(
'iter'
)
plt
.
plot
(
xs
,
ys
,
label
=
legend
[
i
*
num_metrics
+
j
],
linewidth
=
0.5
)
plt
.
legend
()
if
args
.
title
is
not
None
:
plt
.
title
(
args
.
title
)
if
args
.
out
is
None
:
plt
.
show
()
else
:
print
(
f
'save curve to:
{
args
.
out
}
'
)
plt
.
savefig
(
args
.
out
)
plt
.
cla
()
def
add_plot_parser
(
subparsers
):
parser_plt
=
subparsers
.
add_parser
(
'plot_curve'
,
help
=
'parser for plotting curves'
)
parser_plt
.
add_argument
(
'json_logs'
,
type
=
str
,
nargs
=
'+'
,
help
=
'path of train log in json format'
)
parser_plt
.
add_argument
(
'--keys'
,
type
=
str
,
nargs
=
'+'
,
default
=
[
'mAP_0.25'
],
help
=
'the metric that you want to plot'
)
parser_plt
.
add_argument
(
'--title'
,
type
=
str
,
help
=
'title of figure'
)
parser_plt
.
add_argument
(
'--legend'
,
type
=
str
,
nargs
=
'+'
,
default
=
None
,
help
=
'legend of each plot'
)
parser_plt
.
add_argument
(
'--backend'
,
type
=
str
,
default
=
None
,
help
=
'backend of plt'
)
parser_plt
.
add_argument
(
'--style'
,
type
=
str
,
default
=
'dark'
,
help
=
'style of plt'
)
parser_plt
.
add_argument
(
'--out'
,
type
=
str
,
default
=
None
)
def
add_time_parser
(
subparsers
):
parser_time
=
subparsers
.
add_parser
(
'cal_train_time'
,
help
=
'parser for computing the average time per training iteration'
)
parser_time
.
add_argument
(
'json_logs'
,
type
=
str
,
nargs
=
'+'
,
help
=
'path of train log in json format'
)
parser_time
.
add_argument
(
'--include-outliers'
,
action
=
'store_true'
,
help
=
'include the first value of every epoch when computing '
'the average time'
)
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'Analyze Json Log'
)
# currently only support plot curve and calculate average train time
subparsers
=
parser
.
add_subparsers
(
dest
=
'task'
,
help
=
'task parser'
)
add_plot_parser
(
subparsers
)
add_time_parser
(
subparsers
)
args
=
parser
.
parse_args
()
return
args
def
load_json_logs
(
json_logs
):
# load and convert json_logs to log_dict, key is epoch, value is a sub dict
# keys of sub dict is different metrics, e.g. memory, bbox_mAP
# value of sub dict is a list of corresponding values of all iterations
log_dicts
=
[
dict
()
for
_
in
json_logs
]
for
json_log
,
log_dict
in
zip
(
json_logs
,
log_dicts
):
with
open
(
json_log
,
'r'
)
as
log_file
:
for
line
in
log_file
:
log
=
json
.
loads
(
line
.
strip
())
# skip lines without `epoch` field
if
'epoch'
not
in
log
:
continue
epoch
=
log
.
pop
(
'epoch'
)
if
epoch
not
in
log_dict
:
log_dict
[
epoch
]
=
defaultdict
(
list
)
for
k
,
v
in
log
.
items
():
log_dict
[
epoch
][
k
].
append
(
v
)
return
log_dicts
def
main
():
args
=
parse_args
()
json_logs
=
args
.
json_logs
for
json_log
in
json_logs
:
assert
json_log
.
endswith
(
'.json'
)
log_dicts
=
load_json_logs
(
json_logs
)
eval
(
args
.
task
)(
log_dicts
,
args
)
if
__name__
==
'__main__'
:
main
()
tools/benchmark.py
0 → 100644
View file @
4412595a
import
argparse
import
time
import
torch
from
mmcv
import
Config
from
mmcv.parallel
import
MMDataParallel
from
mmcv.runner
import
load_checkpoint
from
tools.fuse_conv_bn
import
fuse_module
from
mmdet.core
import
wrap_fp16_model
from
mmdet.datasets
import
build_dataloader
,
build_dataset
from
mmdet.models
import
build_detector
def
parse_args
():
parser
=
argparse
.
ArgumentParser
(
description
=
'MMDet benchmark a model'
)
parser
.
add_argument
(
'config'
,
help
=
'test config file path'
)
parser
.
add_argument
(
'checkpoint'
,
help
=
'checkpoint file'
)
parser
.
add_argument
(
'--log-interval'
,
default
=
50
,
help
=
'interval of logging'
)
parser
.
add_argument
(
'--fuse-conv-bn'
,
action
=
'store_true'
,
help
=
'Whether to fuse conv and bn, this will slightly increase'
'the inference speed'
)
args
=
parser
.
parse_args
()
return
args
def
main
():
args
=
parse_args
()
cfg
=
Config
.
fromfile
(
args
.
config
)
# set cudnn_benchmark
if
cfg
.
get
(
'cudnn_benchmark'
,
False
):
torch
.
backends
.
cudnn
.
benchmark
=
True
cfg
.
model
.
pretrained
=
None
cfg
.
data
.
test
.
test_mode
=
True
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset
=
build_dataset
(
cfg
.
data
.
test
)
data_loader
=
build_dataloader
(
dataset
,
samples_per_gpu
=
1
,
workers_per_gpu
=
cfg
.
data
.
workers_per_gpu
,
dist
=
False
,
shuffle
=
False
)
# build the model and load checkpoint
model
=
build_detector
(
cfg
.
model
,
train_cfg
=
None
,
test_cfg
=
cfg
.
test_cfg
)
fp16_cfg
=
cfg
.
get
(
'fp16'
,
None
)
if
fp16_cfg
is
not
None
:
wrap_fp16_model
(
model
)
load_checkpoint
(
model
,
args
.
checkpoint
,
map_location
=
'cpu'
)
if
args
.
fuse_conv_bn
:
model
=
fuse_module
(
model
)
model
=
MMDataParallel
(
model
,
device_ids
=
[
0
])
model
.
eval
()
# the first several iterations may be very slow so skip them
num_warmup
=
5
pure_inf_time
=
0
# benchmark with 2000 image and take the average
for
i
,
data
in
enumerate
(
data_loader
):
torch
.
cuda
.
synchronize
()
start_time
=
time
.
perf_counter
()
with
torch
.
no_grad
():
model
(
return_loss
=
False
,
rescale
=
True
,
**
data
)
torch
.
cuda
.
synchronize
()
elapsed
=
time
.
perf_counter
()
-
start_time
if
i
>=
num_warmup
:
pure_inf_time
+=
elapsed
if
(
i
+
1
)
%
args
.
log_interval
==
0
:
fps
=
(
i
+
1
-
num_warmup
)
/
pure_inf_time
print
(
f
'Done image [
{
i
+
1
:
<
3
}
/ 2000], fps:
{
fps
:.
1
f
}
img / s'
)
if
(
i
+
1
)
==
2000
:
pure_inf_time
+=
elapsed
fps
=
(
i
+
1
-
num_warmup
)
/
pure_inf_time
print
(
f
'Overall fps:
{
fps
:.
1
f
}
img / s'
)
break
if
__name__
==
'__main__'
:
main
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment