Unverified Commit 5e9f948d authored by Yifan Xiong's avatar Yifan Xiong Committed by GitHub
Browse files

Executor - Save benchmark results to file (#86)

* Save benchmark results to json file.
parent 18398fba
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
"""SuperBench Executor.""" """SuperBench Executor."""
import json
import itertools
from pathlib import Path from pathlib import Path
from omegaconf import ListConfig from omegaconf import ListConfig
...@@ -94,6 +96,9 @@ def __exec_benchmark(self, context, log_suffix): ...@@ -94,6 +96,9 @@ def __exec_benchmark(self, context, log_suffix):
Args: Args:
context (BenchmarkContext): Benchmark context to launch. context (BenchmarkContext): Benchmark context to launch.
log_suffix (str): Log string suffix. log_suffix (str): Log string suffix.
Return:
dict: Benchmark results.
""" """
try: try:
benchmark = BenchmarkRegistry.launch_benchmark(context) benchmark = BenchmarkRegistry.launch_benchmark(context)
...@@ -106,10 +111,39 @@ def __exec_benchmark(self, context, log_suffix): ...@@ -106,10 +111,39 @@ def __exec_benchmark(self, context, log_suffix):
logger.info('Executor succeeded in %s.', log_suffix) logger.info('Executor succeeded in %s.', log_suffix)
else: else:
logger.error('Executor failed in %s.', log_suffix) logger.error('Executor failed in %s.', log_suffix)
return json.loads(benchmark.serialized_result)
else: else:
logger.error('Executor failed in %s, invalid context.', log_suffix) logger.error('Executor failed in %s, invalid context.', log_suffix)
except Exception: except Exception as e:
logger.error(e)
logger.error('Executor failed in %s.', log_suffix) logger.error('Executor failed in %s.', log_suffix)
return None
def __create_benchmark_dir(self, benchmark_name):
"""Create output directory for benchmark.
Args:
benchmark_name (str): Benchmark name.
"""
benchmark_output_dir = Path(self._output_dir, 'benchmarks', benchmark_name)
if benchmark_output_dir.is_dir() and any(benchmark_output_dir.iterdir()):
logger.warn('Benchmark output directory %s is not empty.', str(benchmark_output_dir))
for i in itertools.count(start=1):
backup_dir = benchmark_output_dir.with_name('{}.{}'.format(benchmark_name, i))
if not backup_dir.is_dir():
benchmark_output_dir.rename(backup_dir)
break
benchmark_output_dir.mkdir(mode=0o755, parents=True, exist_ok=True)
def __write_benchmark_results(self, benchmark_name, benchmark_results):
"""Write benchmark results.
Args:
benchmark_name (str): Benchmark name.
benchmark_results (dict): Benchmark results.
"""
with Path(self._output_dir, 'benchmarks', benchmark_name, 'results.json').open(mode='w') as f:
json.dump(benchmark_results, f, indent=2)
def exec(self): def exec(self):
"""Run the SuperBench benchmarks locally.""" """Run the SuperBench benchmarks locally."""
...@@ -117,6 +151,8 @@ def exec(self): ...@@ -117,6 +151,8 @@ def exec(self):
if benchmark_name not in self._sb_enabled: if benchmark_name not in self._sb_enabled:
continue continue
benchmark_config = self._sb_benchmarks[benchmark_name] benchmark_config = self._sb_benchmarks[benchmark_name]
benchmark_results = {}
self.__create_benchmark_dir(benchmark_name)
for framework in benchmark_config.frameworks or [Framework.NONE]: for framework in benchmark_config.frameworks or [Framework.NONE]:
if benchmark_name.endswith('_models'): if benchmark_name.endswith('_models'):
for model in benchmark_config.models: for model in benchmark_config.models:
...@@ -128,7 +164,11 @@ def exec(self): ...@@ -128,7 +164,11 @@ def exec(self):
framework=Framework(framework.lower()), framework=Framework(framework.lower()),
parameters=self.__get_arguments(benchmark_config.parameters) parameters=self.__get_arguments(benchmark_config.parameters)
) )
self.__exec_benchmark(context, log_suffix) result = self.__exec_benchmark(context, log_suffix)
if framework != Framework.NONE:
benchmark_results['{}/{}'.format(framework, model)] = result
else:
benchmark_results[model] = result
else: else:
log_suffix = 'micro-benchmark {}'.format(benchmark_name) log_suffix = 'micro-benchmark {}'.format(benchmark_name)
logger.info('Executor is going to execute %s.', log_suffix) logger.info('Executor is going to execute %s.', log_suffix)
...@@ -138,4 +178,9 @@ def exec(self): ...@@ -138,4 +178,9 @@ def exec(self):
framework=Framework(framework.lower()), framework=Framework(framework.lower()),
parameters=self.__get_arguments(benchmark_config.parameters) parameters=self.__get_arguments(benchmark_config.parameters)
) )
self.__exec_benchmark(context, log_suffix) result = self.__exec_benchmark(context, log_suffix)
if framework != Framework.NONE:
benchmark_results[framework] = result
else:
benchmark_results = result
self.__write_benchmark_results(benchmark_name, benchmark_results)
...@@ -3,10 +3,12 @@ ...@@ -3,10 +3,12 @@
"""SuperBench Executor test.""" """SuperBench Executor test."""
import json
import unittest import unittest
import shutil import shutil
import tempfile import tempfile
from pathlib import Path from pathlib import Path
from unittest import mock
from omegaconf import OmegaConf from omegaconf import OmegaConf
...@@ -74,7 +76,59 @@ def test_get_arguments(self): ...@@ -74,7 +76,59 @@ def test_get_arguments(self):
), expected_bert_models_args ), expected_bert_models_args
) )
def test_create_benchmark_dir(self):
"""Test __create_benchmark_dir."""
foo_path = Path(self.output_dir, 'benchmarks', 'foo')
self.executor._SuperBenchExecutor__create_benchmark_dir('foo')
self.assertTrue(foo_path.is_dir())
self.assertFalse(any(foo_path.iterdir()))
(foo_path / 'bar.txt').touch()
self.executor._SuperBenchExecutor__create_benchmark_dir('foo')
self.assertTrue(foo_path.is_dir())
self.assertFalse(any(foo_path.iterdir()))
self.assertFalse((foo_path / 'bar.txt').is_file())
self.assertTrue(foo_path.with_name('foo.1').is_dir())
self.assertTrue((foo_path.with_name('foo.1') / 'bar.txt').is_file())
(foo_path / 'bar.json').touch()
self.executor._SuperBenchExecutor__create_benchmark_dir('foo')
self.assertTrue(foo_path.is_dir())
self.assertFalse(any(foo_path.iterdir()))
self.assertFalse((foo_path / 'bar.json').is_file())
self.assertTrue(foo_path.with_name('foo.2').is_dir())
self.assertTrue((foo_path.with_name('foo.2') / 'bar.json').is_file())
def test_write_benchmark_results(self):
"""Test __write_benchmark_results."""
foobar_path = Path(self.output_dir, 'benchmarks', 'foobar')
foobar_results_path = foobar_path / 'results.json'
self.executor._SuperBenchExecutor__create_benchmark_dir('foobar')
foobar_results = {
'sum': 1,
'avg': 1.1,
}
self.executor._SuperBenchExecutor__write_benchmark_results('foobar', foobar_results)
self.assertTrue(foobar_results_path.is_file())
with foobar_results_path.open(mode='r') as f:
self.assertDictEqual(json.load(f), foobar_results)
def test_exec_empty_benchmarks(self): def test_exec_empty_benchmarks(self):
"""Test execute empty benchmarks, nothing should happen.""" """Test execute empty benchmarks, nothing should happen."""
self.executor._sb_enabled = [] self.executor._sb_enabled = []
self.executor.exec() self.executor.exec()
@mock.patch('superbench.executor.SuperBenchExecutor._SuperBenchExecutor__exec_benchmark')
def test_exec_default_benchmarks(self, mock_exec_benchmark):
"""Test execute default benchmarks, mock exec function.
Args:
mock_exec_benchmark (function): Mocked __exec_benchmark function.
"""
mock_exec_benchmark.return_value = {}
self.executor.exec()
self.assertTrue(Path(self.output_dir, 'benchmarks').is_dir())
for benchmark_name in self.executor._sb_benchmarks:
self.assertTrue(Path(self.output_dir, 'benchmarks', benchmark_name).is_dir())
self.assertTrue(Path(self.output_dir, 'benchmarks', benchmark_name, 'results.json').is_file())
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment