test_runner.py 5.19 KB
Newer Older
1
2
3
4
5
6
7
8
9
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.

"""SuperBench Runner test."""

import unittest
import shutil
import tempfile
from pathlib import Path
10
from unittest import mock
11

12
import yaml
13
14
15
16
17
18
19
20
21
22
from omegaconf import OmegaConf

from superbench.runner import SuperBenchRunner


class RunnerTestCase(unittest.TestCase):
    """A class for runner test cases."""
    def setUp(self):
        """Hook method for setting up the test fixture before exercising it."""
        default_config_file = Path(__file__).parent / '../../superbench/config/default.yaml'
23
24
        with default_config_file.open() as fp:
            self.default_config = OmegaConf.create(yaml.load(fp, Loader=yaml.SafeLoader))
25
26
27
28
29
30
31
32
33
34
35
36
37
        self.output_dir = tempfile.mkdtemp()

        self.runner = SuperBenchRunner(self.default_config, None, None, self.output_dir)

    def tearDown(self):
        """Hook method for deconstructing the test fixture after testing it."""
        shutil.rmtree(self.output_dir)

    def test_set_logger(self):
        """Test log file exists."""
        expected_log_file = Path(self.runner._output_dir) / 'sb-run.log'
        self.assertTrue(expected_log_file.is_file())

38
39
40
41
    def test_get_mode_command(self):
        """Test __get_mode_command."""
        test_cases = [
            {
42
                'benchmark_name': 'foo',
43
44
45
                'mode': {
                    'name': 'non_exist',
                },
46
                'expected_command': 'sb exec -c sb.config.yaml -C superbench.enable=foo',
47
48
            },
            {
49
                'benchmark_name': 'foo',
50
                'mode': {
51
52
53
                    'name': 'local',
                    'proc_num': 1,
                    'prefix': '',
54
                },
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
                'expected_command': 'sb exec -c sb.config.yaml -C superbench.enable=foo',
            },
            {
                'benchmark_name':
                'foo',
                'mode': {
                    'name': 'local',
                    'proc_num': 8,
                    'proc_rank': 6,
                    'prefix': 'CUDA_VISIBLE_DEVICES={proc_rank} numactl -c $(({proc_rank}/2))'
                },
                'expected_command':
                ('CUDA_VISIBLE_DEVICES=6 numactl -c $((6/2)) '
                 'sb exec -c sb.config.yaml -C superbench.enable=foo'),
            },
            {
                'benchmark_name': 'foo',
                'mode': {
                    'name': 'local',
                    'proc_num': 16,
                    'proc_rank': 1,
                    'prefix': 'RANK={proc_rank} NUM={proc_num}'
                },
                'expected_command': 'RANK=1 NUM=16 sb exec -c sb.config.yaml -C superbench.enable=foo',
79
80
            },
            {
81
82
                'benchmark_name':
                'foo',
83
84
85
86
87
88
89
90
91
92
                'mode': {
                    'name': 'torch.distributed',
                    'proc_num': 1,
                    'node_num': 'all',
                },
                'expected_command': (
                    'python3 -m torch.distributed.launch '
                    '--use_env --no_python --nproc_per_node=1 '
                    '--nnodes=$NNODES --node_rank=$NODE_RANK '
                    '--master_addr=$MASTER_ADDR --master_port=$MASTER_PORT '
93
94
95
                    'sb exec -c sb.config.yaml -C superbench.enable=foo '
                    'superbench.benchmarks.foo.parameters.distributed_impl=ddp '
                    'superbench.benchmarks.foo.parameters.distributed_backend=nccl'
96
97
98
                ),
            },
            {
99
100
                'benchmark_name':
                'foo',
101
102
103
104
105
106
107
108
109
110
                'mode': {
                    'name': 'torch.distributed',
                    'proc_num': 8,
                    'node_num': 1,
                },
                'expected_command': (
                    'python3 -m torch.distributed.launch '
                    '--use_env --no_python --nproc_per_node=8 '
                    '--nnodes=1 --node_rank=$NODE_RANK '
                    '--master_addr=$MASTER_ADDR --master_port=$MASTER_PORT '
111
112
113
                    'sb exec -c sb.config.yaml -C superbench.enable=foo '
                    'superbench.benchmarks.foo.parameters.distributed_impl=ddp '
                    'superbench.benchmarks.foo.parameters.distributed_backend=nccl'
114
115
116
117
118
119
120
                ),
            },
        ]
        for test_case in test_cases:
            with self.subTest(msg='Testing with case', test_case=test_case):
                self.assertEqual(
                    self.runner._SuperBenchRunner__get_mode_command(
121
                        test_case['benchmark_name'], OmegaConf.create(test_case['mode'])
122
123
124
                    ), test_case['expected_command']
                )

125
126
    def test_run_empty_benchmarks(self):
        """Test run empty benchmarks, nothing should happen."""
127
        self.runner._sb_enabled_benchmarks = []
128
        self.runner.run()
129
130
131
132
133
134
135
136
137
138

    @mock.patch('superbench.runner.ansible.AnsibleClient.run')
    def test_run_default_benchmarks(self, mock_ansible_client_run):
        """Test run default benchmarks, mock AnsibleClient.run function.

        Args:
            mock_ansible_client_run (function): Mocked AnsibleClient.run function.
        """
        mock_ansible_client_run.return_value = 0
        self.runner.run()