Unverified Commit c74f4879 authored by Yifan Xiong's avatar Yifan Xiong Committed by GitHub
Browse files

Executor: Init - Add superbench executor class (#34)

Add superbench executor class

* add executor class
* update default config to exec benchmarks
* add micro benchmarks and model benchmarks
parent 4561c835
# @package _global_
# Hydra config
hydra:
run:
dir: ./outputs/${now:%Y-%m-%d_%H-%M-%S}
sweep:
dir: ./outputs/${now:%Y-%m-%d_%H-%M-%S}
job_logging:
formatters:
colorlog:
format: >-
[%(cyan)s%(asctime)s %(hostname)s%(reset)s][%(blue)s%(filename)s:%(lineno)s%(reset)s][%(log_color)s%(levelname)s%(reset)s] %(message)s
defaults:
- hydra/job_logging: colorlog
- hydra/hydra_logging: colorlog
# SuperBench config
# SuperBench Config
superbench:
use: []
enable: null
benchmarks:
pytorch_models:
matmul:
enable: true
frameworks:
- pytorch
parameters:
num_steps: 2048
sharding_matmul:
enable: true
frameworks:
- pytorch
parameters:
num_steps: 2048
bert_models:
enable: true
frameworks:
- pytorch
models:
- bert-base
- bert-large
parameters:
duration: 0
num_warmup: 64
num_steps: 2048
batch_size: 32
precision:
- float32
- float16
model_action:
- train
- inference
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""SuperBench executor module."""
from superbench.executor.executor import SuperBenchExecutor
__all__ = ['SuperBenchExecutor']
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""SuperBench Executor."""
from pathlib import Path
from omegaconf import ListConfig
from superbench.benchmarks import Platform, Framework, BenchmarkRegistry
from superbench.common.utils import SuperBenchLogger, logger
class SuperBenchExecutor():
"""SuperBench executor class."""
def __init__(self, sb_config, docker_config, output_dir):
"""Initilize.
Args:
sb_config (DictConfig): SuperBench config object.
docker_config (DictConfig): Docker config object.
output_dir (str): Dir for output.
"""
self._sb_config = sb_config
self._docker_config = docker_config
self._output_dir = output_dir
self.__set_logger('sb-exec.log')
logger.info('Executor uses config: %s.', self._sb_config)
logger.info('Executor writes to: %s.', self._output_dir)
self.__validate_sb_config()
self._sb_benchmarks = self._sb_config.superbench.benchmarks
self._sb_enabled = self.__get_enabled_benchmarks()
logger.info('Executor will execute: %s', self._sb_enabled)
def __set_logger(self, filename):
"""Set logger and add file handler.
Args:
filename (str): Log file name.
"""
SuperBenchLogger.add_handler(logger.logger, filename=str(Path(self._output_dir) / filename))
def __validate_sb_config(self):
"""Validate SuperBench config object.
Raise:
InvalidConfigError: If input config is invalid.
"""
# TODO: add validation
def __get_enabled_benchmarks(self):
"""Get enabled benchmarks list.
Return:
list: List of benchmarks which will be executed.
"""
if self._sb_config.superbench.enable:
if isinstance(self._sb_config.superbench.enable, str):
return [self._sb_config.superbench.enable]
elif isinstance(self._sb_config.superbench.enable, (list, ListConfig)):
return list(self._sb_config.superbench.enable)
# TODO: may exist order issue
return [k for k, v in self._sb_benchmarks.items() if v.enable]
def __get_platform(self):
"""Detect runninng platform by environment."""
# TODO: check devices and env vars
return Platform.CUDA
def __get_arguments(self, parameters):
"""Get command line arguments for argparse.
Args:
parameters (DictConfig): Parameters config dict.
Return:
str: Command line arguments.
"""
argv = []
for name, val in parameters.items():
if val is None:
continue
if isinstance(val, (str, int, float)):
argv.append('--{} {}'.format(name, val))
elif isinstance(val, (list, ListConfig)):
argv.append('--{} {}'.format(name, ' '.join(val)))
elif isinstance(val, bool) and val:
argv.append('--{}'.format(name))
return ' '.join(argv)
def __exec_benchmark(self, context, log_suffix):
"""Launch benchmark for context.
Args:
context (BenchmarkContext): Benchmark context to launch.
log_suffix (str): Log string suffix.
"""
benchmark = BenchmarkRegistry.launch_benchmark(context)
if benchmark:
logger.debug(
'benchmark: %s, return code: %s, result: %s.', benchmark.name, benchmark.return_code, benchmark.result
)
if benchmark.return_code == 0:
logger.info('Executor succeeded in %s.', log_suffix)
else:
logger.error('Executor failed in %s.', log_suffix)
else:
logger.error('Executor failed in %s, invalid context.', log_suffix)
def exec(self):
"""Run the SuperBench benchmarks locally."""
for benchmark_name in self._sb_benchmarks:
if benchmark_name not in self._sb_enabled:
continue
benchmark_config = self._sb_benchmarks[benchmark_name]
for framework in benchmark_config.frameworks or [Framework.NONE]:
if benchmark_name.endswith('_models'):
for model in benchmark_config.models:
log_suffix = 'model-benchmark {}: {}/{}'.format(benchmark_name, framework, model)
logger.info('Executor is going to execute %s.', log_suffix)
context = BenchmarkRegistry.create_benchmark_context(
model,
platform=self.__get_platform(),
framework=Framework(framework.lower()).name,
parameters=self.__get_arguments(benchmark_config.parameters)
)
self.__exec_benchmark(context, log_suffix)
else:
log_suffix = 'micro-benchmark {}: {}'.format(benchmark_name, framework)
logger.info('Executor is going to execute %s.', log_suffix)
context = BenchmarkRegistry.create_benchmark_context(
benchmark_name,
platform=self.__get_platform(),
framework=Framework(framework.lower()).name,
parameters=self.__get_arguments(benchmark_config.parameters)
)
self.__exec_benchmark(context, log_suffix)
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
"""SuperBench Executor test."""
import unittest
import shutil
import tempfile
from pathlib import Path
from omegaconf import OmegaConf
from superbench.executor import SuperBenchExecutor
class ExecutorTestCase(unittest.TestCase):
"""A class for executor test cases.
Args:
unittest.TestCase (unittest.TestCase): TestCase class.
"""
def setUp(self):
"""Hook method for setting up the test fixture before exercising it."""
default_config_file = Path(__file__).parent / '../../superbench/config/default.yaml'
self.default_config = OmegaConf.load(str(default_config_file))
self.output_dir = tempfile.mkdtemp()
self.executor = SuperBenchExecutor(self.default_config, None, self.output_dir)
def tearDown(self):
"""Hook method for deconstructing the test fixture after testing it."""
shutil.rmtree(self.output_dir)
def test_set_logger(self):
"""Test log file exists."""
expected_log_file = Path(self.executor._output_dir) / 'sb-exec.log'
self.assertTrue(expected_log_file.is_file())
def test_get_enabled_benchmarks_enable_none(self):
"""Test enabled benchmarks when superbench.enable is none."""
expected_enabled_benchmarks = list(self.default_config.superbench.benchmarks.keys())
self.assertListEqual(self.executor._sb_enabled, expected_enabled_benchmarks)
def test_get_enabled_benchmarks_enable_str(self):
"""Test enabled benchmarks when superbench.enable is string."""
self.executor._sb_config.superbench.enable = 'benchmark_alpha'
expected_enabled_benchmarks = ['benchmark_alpha']
self.assertListEqual(self.executor._SuperBenchExecutor__get_enabled_benchmarks(), expected_enabled_benchmarks)
def test_get_enabled_benchmarks_enable_list(self):
"""Test enabled benchmarks when superbench.enable is list."""
self.executor._sb_config.superbench.enable = ['benchmark_alpha', 'benchmark_beta']
expected_enabled_benchmarks = ['benchmark_alpha', 'benchmark_beta']
self.assertListEqual(self.executor._SuperBenchExecutor__get_enabled_benchmarks(), expected_enabled_benchmarks)
def test_get_platform(self):
"""Test get platform."""
self.assertEqual(self.executor._SuperBenchExecutor__get_platform().value, 'CUDA')
def test_get_arguments(self):
"""Test benchmarks arguments."""
expected_matmul_args = '--num_steps 2048'
self.assertEqual(
self.executor._SuperBenchExecutor__get_arguments(
self.default_config.superbench.benchmarks.matmul.parameters
), expected_matmul_args
)
expected_sharding_matmul_args = '--num_steps 2048'
self.assertEqual(
self.executor._SuperBenchExecutor__get_arguments(
self.default_config.superbench.benchmarks.sharding_matmul.parameters
), expected_sharding_matmul_args
)
expected_bert_models_args = \
'--duration 0 --num_warmup 64 --num_steps 2048 --batch_size 32 ' \
'--precision float32 float16 --model_action train inference'
self.assertEqual(
self.executor._SuperBenchExecutor__get_arguments(
self.default_config.superbench.benchmarks.bert_models.parameters
), expected_bert_models_args
)
def test_exec_empty_benchmarks(self):
"""Test execute empty benchmarks, nothing should happen."""
self.executor._sb_enabled = []
self.executor.exec()
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment