executor.py 9.17 KB
Newer Older
1
2
3
4
5
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.

"""SuperBench Executor."""

6
import os
7
import json
8
9
10
11
12
from pathlib import Path

from omegaconf import ListConfig

from superbench.benchmarks import Platform, Framework, BenchmarkRegistry
13
from superbench.common.utils import SuperBenchLogger, logger, rotate_dir
14
from superbench.common.devices import GPU
15
from superbench.monitor import Monitor
16
17
18
19


class SuperBenchExecutor():
    """SuperBench executor class."""
20
    def __init__(self, sb_config, sb_output_dir):
21
22
23
24
        """Initilize.

        Args:
            sb_config (DictConfig): SuperBench config object.
25
            sb_output_dir (str): SuperBench output directory.
26
27
        """
        self._sb_config = sb_config
28
29
        self._sb_output_dir = sb_output_dir
        self._output_path = Path(sb_output_dir).expanduser().resolve()
30
31

        self.__set_logger('sb-exec.log')
32
33
        logger.debug('Executor uses config: %s.', self._sb_config)
        logger.debug('Executor writes to: %s.', str(self._output_path))
34
35

        self.__validate_sb_config()
36
        self._sb_monitor_config = self._sb_config.superbench.monitor
37
38
        self._sb_benchmarks = self._sb_config.superbench.benchmarks
        self._sb_enabled = self.__get_enabled_benchmarks()
39
        logger.debug('Executor will execute: %s', self._sb_enabled)
40
41
42
43
44
45
46

    def __set_logger(self, filename):
        """Set logger and add file handler.

        Args:
            filename (str): Log file name.
        """
47
        SuperBenchLogger.add_handler(logger.logger, filename=str(self._output_path / filename))
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72

    def __validate_sb_config(self):
        """Validate SuperBench config object.

        Raise:
            InvalidConfigError: If input config is invalid.
        """
        # TODO: add validation

    def __get_enabled_benchmarks(self):
        """Get enabled benchmarks list.

        Return:
            list: List of benchmarks which will be executed.
        """
        if self._sb_config.superbench.enable:
            if isinstance(self._sb_config.superbench.enable, str):
                return [self._sb_config.superbench.enable]
            elif isinstance(self._sb_config.superbench.enable, (list, ListConfig)):
                return list(self._sb_config.superbench.enable)
        # TODO: may exist order issue
        return [k for k, v in self._sb_benchmarks.items() if v.enable]

    def __get_platform(self):
        """Detect runninng platform by environment."""
73
74
75
76
77
78
79
80
81
        try:
            gpu = GPU()
            if gpu.vendor == 'nvidia':
                return Platform.CUDA
            elif gpu.vendor == 'amd':
                return Platform.ROCM
        except Exception as e:
            logger.error(e)
        return Platform.CPU
82
83
84
85
86
87
88
89
90
91
92

    def __get_arguments(self, parameters):
        """Get command line arguments for argparse.

        Args:
            parameters (DictConfig): Parameters config dict.

        Return:
            str: Command line arguments.
        """
        argv = []
93
94
        if not parameters:
            return ''
95
96
97
        for name, val in parameters.items():
            if val is None:
                continue
98
99
100
            if isinstance(val, bool):
                if val:
                    argv.append('--{}'.format(name))
101
            elif isinstance(val, (str, int, float)):
102
103
104
105
106
                argv.append('--{} {}'.format(name, val))
            elif isinstance(val, (list, ListConfig)):
                argv.append('--{} {}'.format(name, ' '.join(val)))
        return ' '.join(argv)

107
    def __exec_benchmark(self, benchmark_full_name, context):
108
109
110
        """Launch benchmark for context.

        Args:
111
            benchmark_full_name (str): Benchmark full name.
112
            context (BenchmarkContext): Benchmark context to launch.
113
114

        Return:
115
            dict: Benchmark result.
116
        """
117
118
119
120
121
122
123
124
        try:
            benchmark = BenchmarkRegistry.launch_benchmark(context)
            if benchmark:
                logger.info(
                    'benchmark: %s, return code: %s, result: %s.', benchmark.name, benchmark.return_code,
                    benchmark.result
                )
                if benchmark.return_code.value == 0:
125
                    logger.info('Executor succeeded in %s.', benchmark_full_name)
126
                else:
127
128
129
130
                    logger.error('Executor failed in %s.', benchmark_full_name)
                result = json.loads(benchmark.serialized_result)
                result['name'] = benchmark_full_name
                return result
131
            else:
132
                logger.error('Executor failed in %s, invalid context.', benchmark_full_name)
133
134
        except Exception as e:
            logger.error(e)
135
            logger.error('Executor failed in %s.', benchmark_full_name)
136
137
        return None

138
139
140
141
142
143
    def __get_rank_id(self):
        """Get rank ID for current process.

        Return:
            int: Rank ID.
        """
144
        for rank_env in ['PROC_RANK', 'LOCAL_RANK', 'OMPI_COMM_WORLD_LOCAL_RANK']:
145
146
147
148
149
            if os.getenv(rank_env):
                return int(os.getenv(rank_env))

        return 0

150
151
152
153
154
    def __get_benchmark_dir(self, benchmark_name):
        """Get output directory for benchmark's current rank.

        Args:
            benchmark_name (str): Benchmark name.
155
156
157

        Return:
            Path: output directory.
158
        """
159
        return self._output_path / 'benchmarks' / benchmark_name / ('rank' + str(self.__get_rank_id()))
160

161
162
163
164
165
166
    def __create_benchmark_dir(self, benchmark_name):
        """Create output directory for benchmark.

        Args:
            benchmark_name (str): Benchmark name.
        """
167
        rotate_dir(self.__get_benchmark_dir(benchmark_name))
168
169
170
171
172
        try:
            self.__get_benchmark_dir(benchmark_name).mkdir(mode=0o755, parents=True, exist_ok=True)
        except Exception:
            logger.exception('Failed to create output directory for benchmark %s.', benchmark_name)
            raise
173
174
175
176
177
178
179
180

    def __write_benchmark_results(self, benchmark_name, benchmark_results):
        """Write benchmark results.

        Args:
            benchmark_name (str): Benchmark name.
            benchmark_results (dict): Benchmark results.
        """
181
        with (self.__get_benchmark_dir(benchmark_name) / 'results.json').open(mode='w') as f:
182
            json.dump(benchmark_results, f, indent=2)
183

184
185
186
187
188
189
190
191
192
193
194
    def __get_monitor_path(self, benchmark_name):
        """Get the output file path for the monitor.

        Args:
            benchmark_name (str): Benchmark name.

        Return:
            str: monitor output file path.
        """
        return f'{self.__get_benchmark_dir(benchmark_name) / "monitor.jsonl"}'

195
196
197
198
199
200
    def exec(self):
        """Run the SuperBench benchmarks locally."""
        for benchmark_name in self._sb_benchmarks:
            if benchmark_name not in self._sb_enabled:
                continue
            benchmark_config = self._sb_benchmarks[benchmark_name]
201
            benchmark_results = list()
202
            self.__create_benchmark_dir(benchmark_name)
203
204
205
206
207
208
209
210
211
212
213
214

            monitor = None
            if self.__get_rank_id() == 0 and self._sb_monitor_config and self._sb_monitor_config.enable:
                if self.__get_platform() == Platform.CUDA:
                    monitor = Monitor(
                        None, int(self._sb_monitor_config.sample_duration or 10),
                        int(self._sb_monitor_config.sample_interval or 1), self.__get_monitor_path(benchmark_name)
                    )
                    monitor.start()
                else:
                    logger.warning('Monitor can not support ROCM/CPU platform.')

215
            benchmark_real_name = benchmark_name.split(':')[0]
216
            for framework in benchmark_config.frameworks or [Framework.NONE.value]:
217
218
219
                if benchmark_real_name == 'model-benchmarks' or (
                    ':' not in benchmark_name and benchmark_name.endswith('_models')
                ):
220
                    for model in benchmark_config.models:
221
222
                        full_name = f'{benchmark_name}/{framework}-{model}'
                        logger.info('Executor is going to execute %s.', full_name)
223
224
225
                        context = BenchmarkRegistry.create_benchmark_context(
                            model,
                            platform=self.__get_platform(),
226
                            framework=Framework(framework.lower()),
227
228
                            parameters=self.__get_arguments(benchmark_config.parameters)
                        )
229
                        result = self.__exec_benchmark(full_name, context)
230
                        benchmark_results.append(result)
231
                else:
232
233
                    full_name = benchmark_name
                    logger.info('Executor is going to execute %s.', full_name)
234
                    context = BenchmarkRegistry.create_benchmark_context(
235
                        benchmark_real_name,
236
                        platform=self.__get_platform(),
237
                        framework=Framework(framework.lower()),
238
239
                        parameters=self.__get_arguments(benchmark_config.parameters)
                    )
240
                    result = self.__exec_benchmark(full_name, context)
241
242
                    benchmark_results.append(result)

243
244
            if monitor:
                monitor.stop()
245
            self.__write_benchmark_results(benchmark_name, benchmark_results)