Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
tsoc
superbenchmark
Commits
5e9f948d
Unverified
Commit
5e9f948d
authored
May 31, 2021
by
Yifan Xiong
Committed by
GitHub
May 31, 2021
Browse files
Executor - Save benchmark results to file (#86)
* Save benchmark results to json file.
parent
18398fba
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
102 additions
and
3 deletions
+102
-3
superbench/executor/executor.py
superbench/executor/executor.py
+48
-3
tests/executor/test_executor.py
tests/executor/test_executor.py
+54
-0
No files found.
superbench/executor/executor.py
View file @
5e9f948d
...
...
@@ -3,6 +3,8 @@
"""SuperBench Executor."""
import
json
import
itertools
from
pathlib
import
Path
from
omegaconf
import
ListConfig
...
...
@@ -94,6 +96,9 @@ def __exec_benchmark(self, context, log_suffix):
Args:
context (BenchmarkContext): Benchmark context to launch.
log_suffix (str): Log string suffix.
Return:
dict: Benchmark results.
"""
try
:
benchmark
=
BenchmarkRegistry
.
launch_benchmark
(
context
)
...
...
@@ -106,10 +111,39 @@ def __exec_benchmark(self, context, log_suffix):
logger
.
info
(
'Executor succeeded in %s.'
,
log_suffix
)
else
:
logger
.
error
(
'Executor failed in %s.'
,
log_suffix
)
return
json
.
loads
(
benchmark
.
serialized_result
)
else
:
logger
.
error
(
'Executor failed in %s, invalid context.'
,
log_suffix
)
except
Exception
:
except
Exception
as
e
:
logger
.
error
(
e
)
logger
.
error
(
'Executor failed in %s.'
,
log_suffix
)
return
None
def
__create_benchmark_dir
(
self
,
benchmark_name
):
"""Create output directory for benchmark.
Args:
benchmark_name (str): Benchmark name.
"""
benchmark_output_dir
=
Path
(
self
.
_output_dir
,
'benchmarks'
,
benchmark_name
)
if
benchmark_output_dir
.
is_dir
()
and
any
(
benchmark_output_dir
.
iterdir
()):
logger
.
warn
(
'Benchmark output directory %s is not empty.'
,
str
(
benchmark_output_dir
))
for
i
in
itertools
.
count
(
start
=
1
):
backup_dir
=
benchmark_output_dir
.
with_name
(
'{}.{}'
.
format
(
benchmark_name
,
i
))
if
not
backup_dir
.
is_dir
():
benchmark_output_dir
.
rename
(
backup_dir
)
break
benchmark_output_dir
.
mkdir
(
mode
=
0o755
,
parents
=
True
,
exist_ok
=
True
)
def
__write_benchmark_results
(
self
,
benchmark_name
,
benchmark_results
):
"""Write benchmark results.
Args:
benchmark_name (str): Benchmark name.
benchmark_results (dict): Benchmark results.
"""
with
Path
(
self
.
_output_dir
,
'benchmarks'
,
benchmark_name
,
'results.json'
).
open
(
mode
=
'w'
)
as
f
:
json
.
dump
(
benchmark_results
,
f
,
indent
=
2
)
def
exec
(
self
):
"""Run the SuperBench benchmarks locally."""
...
...
@@ -117,6 +151,8 @@ def exec(self):
if
benchmark_name
not
in
self
.
_sb_enabled
:
continue
benchmark_config
=
self
.
_sb_benchmarks
[
benchmark_name
]
benchmark_results
=
{}
self
.
__create_benchmark_dir
(
benchmark_name
)
for
framework
in
benchmark_config
.
frameworks
or
[
Framework
.
NONE
]:
if
benchmark_name
.
endswith
(
'_models'
):
for
model
in
benchmark_config
.
models
:
...
...
@@ -128,7 +164,11 @@ def exec(self):
framework
=
Framework
(
framework
.
lower
()),
parameters
=
self
.
__get_arguments
(
benchmark_config
.
parameters
)
)
self
.
__exec_benchmark
(
context
,
log_suffix
)
result
=
self
.
__exec_benchmark
(
context
,
log_suffix
)
if
framework
!=
Framework
.
NONE
:
benchmark_results
[
'{}/{}'
.
format
(
framework
,
model
)]
=
result
else
:
benchmark_results
[
model
]
=
result
else
:
log_suffix
=
'micro-benchmark {}'
.
format
(
benchmark_name
)
logger
.
info
(
'Executor is going to execute %s.'
,
log_suffix
)
...
...
@@ -138,4 +178,9 @@ def exec(self):
framework
=
Framework
(
framework
.
lower
()),
parameters
=
self
.
__get_arguments
(
benchmark_config
.
parameters
)
)
self
.
__exec_benchmark
(
context
,
log_suffix
)
result
=
self
.
__exec_benchmark
(
context
,
log_suffix
)
if
framework
!=
Framework
.
NONE
:
benchmark_results
[
framework
]
=
result
else
:
benchmark_results
=
result
self
.
__write_benchmark_results
(
benchmark_name
,
benchmark_results
)
tests/executor/test_executor.py
View file @
5e9f948d
...
...
@@ -3,10 +3,12 @@
"""SuperBench Executor test."""
import
json
import
unittest
import
shutil
import
tempfile
from
pathlib
import
Path
from
unittest
import
mock
from
omegaconf
import
OmegaConf
...
...
@@ -74,7 +76,59 @@ def test_get_arguments(self):
),
expected_bert_models_args
)
def
test_create_benchmark_dir
(
self
):
"""Test __create_benchmark_dir."""
foo_path
=
Path
(
self
.
output_dir
,
'benchmarks'
,
'foo'
)
self
.
executor
.
_SuperBenchExecutor__create_benchmark_dir
(
'foo'
)
self
.
assertTrue
(
foo_path
.
is_dir
())
self
.
assertFalse
(
any
(
foo_path
.
iterdir
()))
(
foo_path
/
'bar.txt'
).
touch
()
self
.
executor
.
_SuperBenchExecutor__create_benchmark_dir
(
'foo'
)
self
.
assertTrue
(
foo_path
.
is_dir
())
self
.
assertFalse
(
any
(
foo_path
.
iterdir
()))
self
.
assertFalse
((
foo_path
/
'bar.txt'
).
is_file
())
self
.
assertTrue
(
foo_path
.
with_name
(
'foo.1'
).
is_dir
())
self
.
assertTrue
((
foo_path
.
with_name
(
'foo.1'
)
/
'bar.txt'
).
is_file
())
(
foo_path
/
'bar.json'
).
touch
()
self
.
executor
.
_SuperBenchExecutor__create_benchmark_dir
(
'foo'
)
self
.
assertTrue
(
foo_path
.
is_dir
())
self
.
assertFalse
(
any
(
foo_path
.
iterdir
()))
self
.
assertFalse
((
foo_path
/
'bar.json'
).
is_file
())
self
.
assertTrue
(
foo_path
.
with_name
(
'foo.2'
).
is_dir
())
self
.
assertTrue
((
foo_path
.
with_name
(
'foo.2'
)
/
'bar.json'
).
is_file
())
def
test_write_benchmark_results
(
self
):
"""Test __write_benchmark_results."""
foobar_path
=
Path
(
self
.
output_dir
,
'benchmarks'
,
'foobar'
)
foobar_results_path
=
foobar_path
/
'results.json'
self
.
executor
.
_SuperBenchExecutor__create_benchmark_dir
(
'foobar'
)
foobar_results
=
{
'sum'
:
1
,
'avg'
:
1.1
,
}
self
.
executor
.
_SuperBenchExecutor__write_benchmark_results
(
'foobar'
,
foobar_results
)
self
.
assertTrue
(
foobar_results_path
.
is_file
())
with
foobar_results_path
.
open
(
mode
=
'r'
)
as
f
:
self
.
assertDictEqual
(
json
.
load
(
f
),
foobar_results
)
def
test_exec_empty_benchmarks
(
self
):
"""Test execute empty benchmarks, nothing should happen."""
self
.
executor
.
_sb_enabled
=
[]
self
.
executor
.
exec
()
@
mock
.
patch
(
'superbench.executor.SuperBenchExecutor._SuperBenchExecutor__exec_benchmark'
)
def
test_exec_default_benchmarks
(
self
,
mock_exec_benchmark
):
"""Test execute default benchmarks, mock exec function.
Args:
mock_exec_benchmark (function): Mocked __exec_benchmark function.
"""
mock_exec_benchmark
.
return_value
=
{}
self
.
executor
.
exec
()
self
.
assertTrue
(
Path
(
self
.
output_dir
,
'benchmarks'
).
is_dir
())
for
benchmark_name
in
self
.
executor
.
_sb_benchmarks
:
self
.
assertTrue
(
Path
(
self
.
output_dir
,
'benchmarks'
,
benchmark_name
).
is_dir
())
self
.
assertTrue
(
Path
(
self
.
output_dir
,
'benchmarks'
,
benchmark_name
,
'results.json'
).
is_file
())
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment