runner.py 18.1 KB
Newer Older
1
2
3
4
5
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.

"""SuperBench Runner."""

6
import json
7
import random
8
from pathlib import Path
9
from pprint import pformat
10
from collections import defaultdict
11

12
import jsonlines
13
from natsort import natsorted
14
from joblib import Parallel, delayed
15
16
from omegaconf import ListConfig, OmegaConf

17
from superbench.common.utils import SuperBenchLogger, logger
18
from superbench.runner.ansible import AnsibleClient
19
from superbench.benchmarks import ReduceType, Reducer
20
from superbench.monitor import MonitorRecord
21
22
23
24


class SuperBenchRunner():
    """SuperBench runner class."""
25
    def __init__(self, sb_config, docker_config, ansible_config, sb_output_dir):
26
27
28
29
30
31
        """Initilize.

        Args:
            sb_config (DictConfig): SuperBench config object.
            docker_config (DictConfig): Docker config object.
            ansible_config (DictConfig): Ansible config object.
32
            sb_output_dir (str): SuperBench output directory.
33
34
35
36
        """
        self._sb_config = sb_config
        self._docker_config = docker_config
        self._ansible_config = ansible_config
37
38
        self._sb_output_dir = sb_output_dir
        self._output_path = Path(sb_output_dir).expanduser().resolve()
39
        self._ansible_client = AnsibleClient(ansible_config)
40
41

        self.__set_logger('sb-run.log')
42
        logger.info('Runner uses config: %s.', pformat(OmegaConf.to_container(self._sb_config, resolve=True)))
43
        logger.info('Runner writes to: %s.', str(self._output_path))
44

45
        self._sb_benchmarks = self._sb_config.superbench.benchmarks
46
        self.__validate_sb_config()
47
48
49
        self._sb_enabled_benchmarks = self.__get_enabled_benchmarks()
        logger.info('Runner will run: %s', self._sb_enabled_benchmarks)

50
51
52
53
54
55
    def __set_logger(self, filename):
        """Set logger and add file handler.

        Args:
            filename (str): Log file name.
        """
56
        SuperBenchLogger.add_handler(logger.logger, filename=str(self._output_path / filename))
57

Yifan Xiong's avatar
Yifan Xiong committed
58
    def __validate_sb_config(self):    # noqa: C901
59
60
61
62
63
64
        """Validate SuperBench config object.

        Raise:
            InvalidConfigError: If input config is invalid.
        """
        # TODO: add validation and defaulting
65
66
        if not self._sb_config.superbench.env:
            self._sb_config.superbench.env = {}
67
68
69
70
71
72
73
74
75
76
77
78
        for name in self._sb_benchmarks:
            if not self._sb_benchmarks[name].modes:
                self._sb_benchmarks[name].modes = []
            for idx, mode in enumerate(self._sb_benchmarks[name].modes):
                if mode.name == 'local':
                    if not mode.proc_num:
                        self._sb_benchmarks[name].modes[idx].proc_num = 1
                    if not mode.prefix:
                        self._sb_benchmarks[name].modes[idx].prefix = ''
                elif mode.name == 'torch.distributed':
                    if not mode.proc_num:
                        self._sb_benchmarks[name].modes[idx].proc_num = 8
Yifan Xiong's avatar
Yifan Xiong committed
79
80
81
82
83
84
85
86
87
88
89
90
                elif mode.name == 'mpi':
                    if not mode.mca:
                        self._sb_benchmarks[name].modes[idx].mca = {
                            'pml': 'ob1',
                            'btl': '^openib',
                            'btl_tcp_if_exclude': 'lo,docker0',
                            'coll_hcoll_enable': 0,
                        }
                    if not mode.env:
                        self._sb_benchmarks[name].modes[idx].env = {}
                    for key in ['PATH', 'LD_LIBRARY_PATH', 'SB_MICRO_PATH']:
                        self._sb_benchmarks[name].modes[idx].env.setdefault(key, None)
91

92
93
94
95
96
97
98
99
100
101
102
103
104
    def __get_enabled_benchmarks(self):
        """Get enabled benchmarks list.

        Return:
            list: List of benchmarks which will be executed.
        """
        if self._sb_config.superbench.enable:
            if isinstance(self._sb_config.superbench.enable, str):
                return [self._sb_config.superbench.enable]
            elif isinstance(self._sb_config.superbench.enable, (list, ListConfig)):
                return list(self._sb_config.superbench.enable)
        return [k for k, v in self._sb_benchmarks.items() if v.enable]

105
    def __get_mode_command(self, benchmark_name, mode, timeout=None):
106
107
108
        """Get runner command for given mode.

        Args:
109
            benchmark_name (str): Benchmark name.
110
            mode (DictConfig): Runner mode.
111
            timeout (int): The timeout value in seconds.
112
113
114
115

        Return:
            str: Runner command.
        """
116
117
118
119
        exec_command = ('sb exec --output-dir {output_dir} -c sb.config.yaml -C superbench.enable={name}').format(
            name=benchmark_name,
            output_dir=self._sb_output_dir,
        )
120
121
122
        if timeout is not None:
            exec_command = 'timeout {timeout} {command}'.format(timeout=timeout, command=exec_command)

123
124
125
126
127
128
        mode_command = exec_command
        if mode.name == 'local':
            mode_command = '{prefix} {command}'.format(
                prefix=mode.prefix.format(proc_rank=mode.proc_rank, proc_num=mode.proc_num),
                command=exec_command,
            )
129
            mode_command = f'PROC_RANK={mode.proc_rank} {mode_command.strip()}'
130
        elif mode.name == 'torch.distributed':
131
132
            # TODO: replace with torch.distributed.run in v1.9
            # TODO: only supports node_num=1 and node_num=all currently
133
134
            torch_dist_params = '' if mode.node_num == 1 else \
                '--nnodes=$NNODES --node_rank=$NODE_RANK --master_addr=$MASTER_ADDR --master_port=$MASTER_PORT '
135
            mode_command = (
136
137
138
139
                f'python3 -m torch.distributed.launch'
                f' --use_env --no_python --nproc_per_node={mode.proc_num} {torch_dist_params}{exec_command}'
                f' superbench.benchmarks.{benchmark_name}.parameters.distributed_impl=ddp'
                f' superbench.benchmarks.{benchmark_name}.parameters.distributed_backend=nccl'
140
            )
Yifan Xiong's avatar
Yifan Xiong committed
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
        elif mode.name == 'mpi':
            mode_command = (
                'mpirun '    # use default OpenMPI in image
                '-tag-output '    # tag mpi output with [jobid,rank]<stdout/stderr> prefix
                '-allow-run-as-root '    # allow mpirun to run when executed by root user
                '-hostfile hostfile '    # use prepared hostfile
                '-map-by ppr:{proc_num}:node '    # launch {proc_num} processes on each node
                '-bind-to numa '    # bind processes to numa
                '{mca_list} {env_list} {command}'
            ).format(
                proc_num=mode.proc_num,
                mca_list=' '.join(f'-mca {k} {v}' for k, v in mode.mca.items()),
                env_list=' '.join(f'-x {k}={v}' if v else f'-x {k}' for k, v in mode.env.items()),
                command=exec_command,
            )
        else:
            logger.warning('Unknown mode %s.', mode.name)
158
        return mode_command.strip()
159

160
161
162
163
164
    def deploy(self):    # pragma: no cover
        """Deploy SuperBench environment."""
        logger.info('Preparing SuperBench environment.')
        extravars = {
            'ssh_port': random.randint(1 << 14, (1 << 15) - 1),
165
            'output_dir': str(self._output_path),
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
            'docker_image': self._docker_config.image,
        }
        if bool(self._docker_config.username) and bool(self._docker_config.password):
            extravars.update(
                {
                    'docker_registry': self._docker_config.registry,
                    'docker_username': self._docker_config.username,
                    'docker_password': self._docker_config.password,
                }
            )
        self._ansible_client.run(self._ansible_client.get_playbook_config('deploy.yaml', extravars=extravars))

    def check_env(self):    # pragma: no cover
        """Check SuperBench environment."""
        logger.info('Checking SuperBench environment.')
181
        OmegaConf.save(config=self._sb_config, f=str(self._output_path / 'sb.config.yaml'))
182
        self._ansible_client.run(
183
184
185
            self._ansible_client.get_playbook_config(
                'check_env.yaml',
                extravars={
186
                    'output_dir': str(self._output_path),
187
188
189
                    'env': '\n'.join(f'{k}={v}' for k, v in self._sb_config.superbench.env.items()),
                }
            )
190
191
        )

192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
    def fetch_results(self):    # pragma: no cover
        """Fetch benchmark results on all nodes."""
        try:
            (self._output_path / 'nodes').mkdir(mode=0o755, parents=True, exist_ok=True)
        except Exception:
            logger.exception('Failed to create directory %s.', str(self._output_path / 'nodes'))
            raise
        self._ansible_client.run(
            self._ansible_client.get_playbook_config(
                'fetch_results.yaml',
                extravars={
                    'sb_output_dir': self._sb_output_dir,
                    'absolute_output_dir': str(self._output_path),
                }
            )
        )

209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
    def __create_results_summary(self):    # pragma: no cover
        """Create the result summary file of all nodes."""
        all_results = list()
        for node_path in (self._output_path / 'nodes').glob('*'):
            if not node_path.is_dir():
                continue
            results_summary = self.__create_single_node_summary(node_path)
            results_summary['node'] = node_path.name
            all_results.append(results_summary)

        with (self._output_path / 'results-summary.jsonl').open(mode='w') as f:
            for result in all_results:
                json.dump(result, f)
                f.write('\n')

224
    def __create_single_node_summary(self, node_path):    # pragma: no cover # noqa: C901
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
        """Create the result summary file of single node.

        Args:
            node_path (Path): The Path instance of node directory.

        Returns:
            dict: Result summary of single node.
        """
        results_summary = dict()
        reduce_ops = dict()
        file_list = [Path(f) for f in natsorted([str(f) for f in node_path.glob('**/results.json')])]
        for results_file in file_list:
            with results_file.open() as f:
                try:
                    results = json.load(f)
                except ValueError:
                    logger.error('Invalid JSON file: {}'.format(results_file))
                    continue

                for result in results:
245
246
247
248
249
                    try:
                        benchmark_name = result['name']
                    except Exception:
                        logger.error('Invalid content in JSON file: {}'.format(results_file))
                        continue
250
251
252
253
254
255
256
257
258
259
260
261
                    if benchmark_name not in results_summary:
                        results_summary[benchmark_name] = defaultdict(list)
                    for metric in result['result']:
                        metric_name = '{}/{}'.format(benchmark_name, metric)
                        if metric_name not in reduce_ops:
                            reduce_ops[metric_name] = result['reduce_op'][metric]
                        elif reduce_ops[metric_name] != result['reduce_op'][metric]:
                            logger.error('Inconsistent reduce type for metric: {}'.format(metric_name))
                            continue

                        results_summary[benchmark_name][metric].append(result['result'][metric])

262
263
264
        results_summary = self.__merge_benchmark_metrics(results_summary, reduce_ops)
        monitor_summary = self.__merge_monitor_metrics(node_path)
        results_summary = {**results_summary, **monitor_summary}
265
266
267
268
269
        with (node_path / 'results-summary.json').open(mode='w') as f:
            json.dump(results_summary, f, indent=2)

        return results_summary

270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
    def __generate_metric_name(self, benchmark_name, metric, rank_count, run_count, curr_rank, curr_run):
        """Generate the summarized metrics name.

        The format of metric name is:
               {benchmark_name}/[{run_count}/]{metric_name}[:rank]
        [run_count] and [rank] parts are optional.

        Args:
            benchmark_name (str): The benchmark name.
            metric (str): The metric name.
            rank_count (int): The total count of rank.
            run_count (int): The total count of benchmarking.
            curr_rank (int): The current rank index.
            curr_run (int): The current run index.

        Returns:
            dict: Flattened result with metric as key.
        """
        metric_name = benchmark_name
        if run_count > 1:
            metric_name = '{}/{}'.format(metric_name, curr_run)
        metric_name = '{}/{}'.format(metric_name, metric)
        if rank_count > 1:
            metric_name = '{}:{}'.format(metric_name, curr_rank)

        return metric_name

297
    def __merge_benchmark_metrics(self, results_summary, reduce_ops):
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
        """Merge metrics of all benchmarks in one node.

        Args:
            results_summary (dict): Summarized result of one node.
            reduce_ops (dict): The reduce type of each metric.

        Returns:
            dict: Flattened result with metric as key.
        """
        metrics_summary = dict()
        for benchmark_name in results_summary:
            for metric in results_summary[benchmark_name]:
                metric_name = '{}/{}'.format(benchmark_name, metric)
                if metric_name not in reduce_ops or (
                    reduce_ops[metric_name] is not None and reduce_ops[metric_name] not in ReduceType.get_values()
                ):
                    logger.error('Unknown reduce type for metric: {}'.format(metric_name))
                    continue

                if reduce_ops[metric_name] is not None:
                    reduce_func = Reducer.get_reduce_func(ReduceType(reduce_ops[metric_name]))
                    values = [reduce_func(list(result)) for result in zip(*results_summary[benchmark_name][metric])]
320
321
322
                    for run in range(len(values)):
                        metric_name = self.__generate_metric_name(benchmark_name, metric, 1, len(values), 0, run)
                        metrics_summary[metric_name] = values[run]
323
                else:
324
325
326
327
328
329
330
331
                    rank_count = len(results_summary[benchmark_name][metric])
                    for rank, rank_value in enumerate(results_summary[benchmark_name][metric]):
                        run_count = len(rank_value)
                        for run, run_value in enumerate(rank_value):
                            metric_name = self.__generate_metric_name(
                                benchmark_name, metric, rank_count, run_count, rank, run
                            )
                            metrics_summary[metric_name] = run_value
332
333
334

        return metrics_summary

335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
    def __merge_monitor_metrics(self, node_path):
        """Merge and summarize monitor metrics of one node.

        Args:
            node_path (Path): The Path instance of node directory.

        Returns:
            dict: Flattened result with metric as key.
        """
        metrics_summary = dict()
        all_samples = list()
        file_list = list(node_path.glob('**/monitor.jsonl'))
        for results_file in file_list:
            try:
                with jsonlines.open(results_file) as reader:
                    all_samples = list(reader)
            except BaseException as e:
                logger.error('Invalid Jsonline file: {}, error message: {}'.format(results_file, str(e)))
                continue
        all_samples = sorted(all_samples, key=lambda k: k.get('time', '0'))
        metrics_dict = dict()
        for sample in all_samples:
            for metric, value in sample.items():
                if metric not in metrics_dict:
                    metrics_dict[metric] = list()
                metrics_dict[metric].append(value)

        for metric, values in metrics_dict.items():
            for pattern, reduce_type in MonitorRecord.reduce_ops.items():
                if pattern in metric:
                    reduce_func = Reducer.get_reduce_func(reduce_type)
366
367
                    metric_name = 'monitor/{}'.format(metric)
                    metrics_summary[metric_name] = reduce_func(values)
368
369
370
371
                    continue

        return metrics_summary

372
373
374
375
376
377
378
379
380
381
382
383
384
    def _run_proc(self, benchmark_name, mode, vars):
        """Run the process.

        Args:
            benchmark_name (str): Benchmark name.
            mode (DictConfig): Runner mode.
            vars (dict): Process variables.

        Returns:
            int: Process return code.
        """
        mode.update(vars)
        logger.info('Runner is going to run %s in %s mode, proc rank %d.', benchmark_name, mode.name, mode.proc_rank)
385
386

        timeout = self._sb_benchmarks[benchmark_name].timeout
Yifan Xiong's avatar
Yifan Xiong committed
387
388
389
390
        ansible_runner_config = self._ansible_client.get_shell_config(
            (
                'docker exec sb-workspace bash -c '
                "'set -o allexport && source sb.env && set +o allexport && {command}'"
391
            ).format(command=self.__get_mode_command(benchmark_name, mode, timeout))
392
        )
Yifan Xiong's avatar
Yifan Xiong committed
393
394
        if mode.name == 'mpi':
            ansible_runner_config = self._ansible_client.update_mpi_config(ansible_runner_config)
395
396
397

        ansible_runner_config['timeout'] = timeout

Yifan Xiong's avatar
Yifan Xiong committed
398
        rc = self._ansible_client.run(ansible_runner_config, sudo=True)
399
400
        return rc

401
    def run(self):
402
403
404
405
406
407
        """Run the SuperBench benchmarks distributedly."""
        self.check_env()
        for benchmark_name in self._sb_benchmarks:
            if benchmark_name not in self._sb_enabled_benchmarks:
                continue
            benchmark_config = self._sb_benchmarks[benchmark_name]
408
409
410
411
412
413
            for mode in benchmark_config.modes:
                if mode.name == 'local':
                    Parallel(n_jobs=mode.proc_num if mode.parallel else 1)(
                        delayed(self._run_proc)(benchmark_name, mode, {
                            'proc_rank': proc_rank
                        }) for proc_rank in range(mode.proc_num)
414
                    )
Yifan Xiong's avatar
Yifan Xiong committed
415
                elif mode.name == 'torch.distributed' or mode.name == 'mpi':
416
                    self._run_proc(benchmark_name, mode, {'proc_rank': 0})
Yifan Xiong's avatar
Yifan Xiong committed
417
418
                else:
                    logger.warning('Unknown mode %s.', mode.name)
419
            self.fetch_results()
420
421

        self.__create_results_summary()