test_executor.py 7 KB
Newer Older
1
2
3
4
5
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.

"""SuperBench Executor test."""

6
import json
7
8
9
10
import unittest
import shutil
import tempfile
from pathlib import Path
11
from unittest import mock
12

13
import yaml
14
15
from omegaconf import OmegaConf

16
from superbench.benchmarks import ReturnCode
17
18
19
20
21
22
23
24
25
26
27
28
from superbench.executor import SuperBenchExecutor


class ExecutorTestCase(unittest.TestCase):
    """A class for executor test cases.

    Args:
        unittest.TestCase (unittest.TestCase): TestCase class.
    """
    def setUp(self):
        """Hook method for setting up the test fixture before exercising it."""
        default_config_file = Path(__file__).parent / '../../superbench/config/default.yaml'
29
30
        with default_config_file.open() as fp:
            self.default_config = OmegaConf.create(yaml.load(fp, Loader=yaml.SafeLoader))
31
        self.sb_output_dir = tempfile.mkdtemp()
32

33
        self.executor = SuperBenchExecutor(self.default_config, self.sb_output_dir)
34
35
36

    def tearDown(self):
        """Hook method for deconstructing the test fixture after testing it."""
37
        shutil.rmtree(self.sb_output_dir)
38
39
40

    def test_set_logger(self):
        """Test log file exists."""
41
        expected_log_file = Path(self.executor._sb_output_dir) / 'sb-exec.log'
42
43
44
45
        self.assertTrue(expected_log_file.is_file())

    def test_get_enabled_benchmarks_enable_none(self):
        """Test enabled benchmarks when superbench.enable is none."""
46
47
        benchmarks = self.default_config.superbench.benchmarks
        expected_enabled_benchmarks = [x for x in benchmarks if benchmarks[x]['enable']]
48
49
50
51
52
53
54
55
56
57
58
59
60
61
        self.assertListEqual(self.executor._sb_enabled, expected_enabled_benchmarks)

    def test_get_enabled_benchmarks_enable_str(self):
        """Test enabled benchmarks when superbench.enable is string."""
        self.executor._sb_config.superbench.enable = 'benchmark_alpha'
        expected_enabled_benchmarks = ['benchmark_alpha']
        self.assertListEqual(self.executor._SuperBenchExecutor__get_enabled_benchmarks(), expected_enabled_benchmarks)

    def test_get_enabled_benchmarks_enable_list(self):
        """Test enabled benchmarks when superbench.enable is list."""
        self.executor._sb_config.superbench.enable = ['benchmark_alpha', 'benchmark_beta']
        expected_enabled_benchmarks = ['benchmark_alpha', 'benchmark_beta']
        self.assertListEqual(self.executor._SuperBenchExecutor__get_enabled_benchmarks(), expected_enabled_benchmarks)

62
63
    @mock.patch('pathlib.Path.is_char_device')
    def test_get_platform(self, mock_is_char_device):
64
        """Test get platform."""
65
        mock_is_char_device.return_value = True
66
67
68
69
        self.assertEqual(self.executor._SuperBenchExecutor__get_platform().value, 'CUDA')

    def test_get_arguments(self):
        """Test benchmarks arguments."""
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
        test_cases = [
            {
                'parameters': None,
                'expected_args': '',
            },
            {
                'parameters': {
                    'duration': 0,
                    'num_warmup': 16,
                    'num_steps': 128,
                    'batch_size': 16,
                    'precision': ['float32', 'float16'],
                    'model_action': ['train', 'inference'],
                },
                'expected_args': (
                    '--duration 0 --num_warmup 16 --num_steps 128 --batch_size 16 '
                    '--precision float32 float16 --model_action train inference'
                ),
            },
        ]
        for test_case in test_cases:
            with self.subTest(msg='Testing with case', test_case=test_case):
                self.assertEqual(
                    self.executor._SuperBenchExecutor__get_arguments(test_case['parameters']),
                    test_case['expected_args']
                )
96

97
98
    def test_create_benchmark_dir(self):
        """Test __create_benchmark_dir."""
99
        foo_path = Path(self.sb_output_dir, 'benchmarks', 'foo', 'rank0')
100
101
102
103
104
105
106
107
108
        self.executor._SuperBenchExecutor__create_benchmark_dir('foo')
        self.assertTrue(foo_path.is_dir())
        self.assertFalse(any(foo_path.iterdir()))

        (foo_path / 'bar.txt').touch()
        self.executor._SuperBenchExecutor__create_benchmark_dir('foo')
        self.assertTrue(foo_path.is_dir())
        self.assertFalse(any(foo_path.iterdir()))
        self.assertFalse((foo_path / 'bar.txt').is_file())
109
110
        self.assertTrue(foo_path.with_name('rank0.bak1').is_dir())
        self.assertTrue((foo_path.with_name('rank0.bak1') / 'bar.txt').is_file())
111
112
113
114
115
116

        (foo_path / 'bar.json').touch()
        self.executor._SuperBenchExecutor__create_benchmark_dir('foo')
        self.assertTrue(foo_path.is_dir())
        self.assertFalse(any(foo_path.iterdir()))
        self.assertFalse((foo_path / 'bar.json').is_file())
117
118
        self.assertTrue(foo_path.with_name('rank0.bak2').is_dir())
        self.assertTrue((foo_path.with_name('rank0.bak2') / 'bar.json').is_file())
119
120
121

    def test_write_benchmark_results(self):
        """Test __write_benchmark_results."""
122
        foobar_path = Path(self.sb_output_dir, 'benchmarks', 'foobar', 'rank0')
123
124
125
126
127
128
129
130
131
132
133
        foobar_results_path = foobar_path / 'results.json'
        self.executor._SuperBenchExecutor__create_benchmark_dir('foobar')
        foobar_results = {
            'sum': 1,
            'avg': 1.1,
        }
        self.executor._SuperBenchExecutor__write_benchmark_results('foobar', foobar_results)
        self.assertTrue(foobar_results_path.is_file())
        with foobar_results_path.open(mode='r') as f:
            self.assertDictEqual(json.load(f), foobar_results)

134
135
136
137
    def test_exec_empty_benchmarks(self):
        """Test execute empty benchmarks, nothing should happen."""
        self.executor._sb_enabled = []
        self.executor.exec()
138

139
140
    @mock.patch('superbench.benchmarks.BenchmarkRegistry.launch_benchmark')
    def test_exec_default_benchmarks(self, mock_launch_benchmark):
141
142
143
        """Test execute default benchmarks, mock exec function.

        Args:
144
            mock_launch_benchmark (function): Mocked BenchmarkRegistry.launch_benchmark function in __exec_benchmark.
145
        """
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
        mock_launch_benchmark.return_value = OmegaConf.create(
            {
                'name': 'foobar',
                'return_code': ReturnCode.SUCCESS,
                'result': {
                    'return_code': [0],
                    'metric1': [-1.0],
                    'metric2': [1.0]
                },
                'serialized_result': json.dumps({
                    'name': 'foobar',
                    'return_code': 0,
                }),
            }
        )
161
162
        self.executor.exec()

163
        self.assertTrue(Path(self.sb_output_dir, 'benchmarks').is_dir())
164
        for benchmark_name in self.executor._sb_enabled:
165
166
167
168
169
170
            p = Path(self.sb_output_dir, 'benchmarks', benchmark_name, 'rank0')
            self.assertTrue(p.is_dir())
            self.assertTrue((p / 'results.json').is_file())
            with (p / 'results.json').open() as f:
                for result in json.load(f):
                    self.assertIn(benchmark_name, result['name'])