test_disk_performance.py 12.8 KB
Newer Older
1
2
3
4
5
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.

"""Tests for disk-performance benchmark."""

6
7
import os

8
import unittest
9
from unittest import mock
10

11
12
from tests.helper import decorator
from tests.helper.testcase import BenchmarkTestCase
13
14
15
from superbench.benchmarks import BenchmarkRegistry, BenchmarkType, ReturnCode, Platform


16
class DiskBenchmarkTest(BenchmarkTestCase, unittest.TestCase):
17
    """Test class for disk-performance benchmark."""
18
19
20
21
22
23
    @classmethod
    def setUpClass(cls):
        """Hook method for setting up class fixture before running tests in the class."""
        super().setUpClass()
        cls.createMockEnvs(cls)
        cls.createMockFiles(cls, ['bin/fio'])
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120

    def test_disk_performance_empty_param(self):
        """Test disk-performance benchmark command generation with empty parameter."""
        benchmark_name = 'disk-benchmark'
        (benchmark_class,
         predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(benchmark_name, Platform.CPU)
        assert (benchmark_class)

        benchmark = benchmark_class(benchmark_name, parameters='')

        # Check basic information
        assert (benchmark)
        ret = benchmark._preprocess()
        assert (ret is True)
        assert (benchmark.return_code == ReturnCode.SUCCESS)
        assert (benchmark.name == 'disk-benchmark')
        assert (benchmark.type == BenchmarkType.MICRO)

        # Command list should be empty
        assert (0 == len(benchmark._commands))

    @mock.patch('pathlib.Path.is_block_device')
    def test_disk_performance_invalid_block_device(self, mock_is_block_device):
        """Test disk-performance benchmark command generation with invalid block device."""
        mock_is_block_device.return_value = False

        benchmark_name = 'disk-benchmark'
        (benchmark_class,
         predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(benchmark_name, Platform.CPU)
        assert (benchmark_class)

        block_devices = ['mock_block_device_0']
        block_device_option = '--block_devices ' + ' '.join(block_devices)

        benchmark = benchmark_class(benchmark_name, parameters=block_device_option)

        # Check basic information
        assert (benchmark)
        ret = benchmark._preprocess()
        assert (ret is False)
        assert (benchmark.return_code == ReturnCode.INVALID_ARGUMENT)
        assert (benchmark.name == 'disk-benchmark')
        assert (benchmark.type == BenchmarkType.MICRO)

    @mock.patch('pathlib.Path.is_block_device')
    def test_disk_performance_benchmark_disabled(self, mock_is_block_device):
        """Test disk-performance benchmark command generation with all benchmarks disabled."""
        mock_is_block_device.return_value = True

        benchmark_name = 'disk-benchmark'
        (benchmark_class,
         predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(benchmark_name, Platform.CPU)
        assert (benchmark_class)

        block_devices = ['/dev/nvme0n1', '/dev/nvme1n1']
        block_device_option = '--block_devices ' + ' '.join(block_devices)

        param_str = block_device_option
        param_str += ' --rand_precond_time=0'
        param_str += ' --seq_read_runtime=0'
        param_str += ' --rand_read_runtime=0'
        benchmark = benchmark_class(benchmark_name, parameters=param_str)

        # Check basic information
        assert (benchmark)
        ret = benchmark._preprocess()
        assert (ret is True)
        assert (benchmark.return_code == ReturnCode.SUCCESS)
        assert (benchmark.name == 'disk-benchmark')
        assert (benchmark.type == BenchmarkType.MICRO)

        # Command list should be empty
        assert (0 == len(benchmark._commands))

    @mock.patch('pathlib.Path.is_block_device')
    def test_disk_performance_benchmark_enabled(self, mock_is_block_device):
        """Test disk-performance benchmark command generation with all benchmarks enabled."""
        mock_is_block_device.return_value = True

        benchmark_name = 'disk-benchmark'
        (benchmark_class,
         predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(benchmark_name, Platform.CPU)
        assert (benchmark_class)

        block_devices = ['mock_block_device_0', 'mock_block_device_1']
        block_device_option = '--block_devices ' + ' '.join(block_devices)

        init_test_magic = 45
        curr_test_magic = init_test_magic
        param_str = block_device_option
        # Sequential precondition
        param_str += ' --enable_seq_precond'
        # Random precondition
        param_str += ' --rand_precond_time=%d' % curr_test_magic
        curr_test_magic += 1
        # Seq/rand read/write
        for io_pattern in ['seq', 'rand']:
121
            for io_type in ['read', 'write', 'readwrite']:
122
123
124
125
126
127
128
129
130
                io_str = '%s_%s' % (io_pattern, io_type)
                param_str += ' --%s_ramp_time=%d' % (io_str, curr_test_magic)
                curr_test_magic += 1
                param_str += ' --%s_runtime=%d' % (io_str, curr_test_magic)
                curr_test_magic += 1
                param_str += ' --%s_iodepth=%d' % (io_str, curr_test_magic)
                curr_test_magic += 1
                param_str += ' --%s_numjobs=%d' % (io_str, curr_test_magic)
                curr_test_magic += 1
131
132
        # Verify
        param_str += ' --verify=md5'
133
134
135
136
137
138
139
140
141
142
143
        benchmark = benchmark_class(benchmark_name, parameters=param_str)

        # Check basic information
        assert (benchmark)
        ret = benchmark._preprocess()
        assert (ret is True)
        assert (benchmark.return_code == ReturnCode.SUCCESS)
        assert (benchmark.name == 'disk-benchmark')
        assert (benchmark.type == BenchmarkType.MICRO)

        # Check command list
144
145
        # 2 files * (2 preconditions + 3 io_patterns * 2 io_types) = 16 commands
        assert (16 == len(benchmark._commands))
146
147
148

        # Check parameter assignments
        command_idx = 0
149
        default_rwmixread = 80
150
151
152
153
154
        for block_device in block_devices:
            curr_test_magic = init_test_magic

            # Sequential precondition
            assert ('--filename=%s' % block_device in benchmark._commands[command_idx])
155
            assert ('--verify=md5' in benchmark._commands[command_idx])
156
157
158
159
            command_idx += 1
            # Random precondition
            assert ('--filename=%s' % block_device in benchmark._commands[command_idx])
            assert ('--runtime=%d' % curr_test_magic in benchmark._commands[command_idx])
160
            assert ('--verify=md5' in benchmark._commands[command_idx])
161
162
163
164
            curr_test_magic += 1
            command_idx += 1
            # Seq/rand read/write
            for io_pattern in ['seq', 'rand']:
165
                for io_type in ['read', 'write', 'rw']:
166
167
168
169
170
171
172
173
174
175
176
                    assert ('--filename=%s' % block_device in benchmark._commands[command_idx])
                    fio_rw = '%s%s' % (io_pattern if io_pattern == 'rand' else '', io_type)
                    assert ('--rw=%s' % fio_rw in benchmark._commands[command_idx])
                    assert ('--ramp_time=%d' % curr_test_magic in benchmark._commands[command_idx])
                    curr_test_magic += 1
                    assert ('--runtime=%d' % curr_test_magic in benchmark._commands[command_idx])
                    curr_test_magic += 1
                    assert ('--iodepth=%d' % curr_test_magic in benchmark._commands[command_idx])
                    curr_test_magic += 1
                    assert ('--numjobs=%d' % curr_test_magic in benchmark._commands[command_idx])
                    curr_test_magic += 1
177
178
                    if io_type == 'rw':
                        assert ('--rwmixread=%d' % default_rwmixread in benchmark._commands[command_idx])
179
                    assert ('--verify=md5' in benchmark._commands[command_idx])
180
181
                    command_idx += 1

182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
    @mock.patch('pathlib.Path.is_block_device')
    def test_disk_performance_env_parsing(self, mock_is_block_device):
        """Test disk-performance benchmark env parsing."""
        mock_is_block_device.return_value = True

        benchmark_name = 'disk-benchmark'
        (benchmark_class,
         predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(benchmark_name, Platform.CPU)
        assert (benchmark_class)

        # Test valid envs
        proc_ranks = ['0', '1', '2', '3']
        block_devices = ['/dev/nvme0n1', '/dev/nvme2n1', '/dev/nvme1n1', '/dev/nvme3n1']
        numa_nodes = ['0', '0', '1', '1']
        os.environ['NUMA_NODES'] = ','.join(numa_nodes)
        param_str = '--block_devices ' + ' '.join(block_devices)

        for proc_rank in proc_ranks:
            os.environ['PROC_RANK'] = proc_rank
            benchmark = benchmark_class(benchmark_name, parameters=param_str)

            # Check basic information
            assert (benchmark)
            ret = benchmark._preprocess()
            assert (ret is True)
            assert (benchmark.return_code == ReturnCode.SUCCESS)
            assert (benchmark.name == 'disk-benchmark')
            assert (benchmark.type == BenchmarkType.MICRO)

            # Check command list
            # seq/rand read = 2 commands
            assert (2 == len(benchmark._commands))

            command_idx = 0
            commands_per_device = 2
            block_device = block_devices[int(proc_rank)]
            assert (benchmark._args.numa == int(numa_nodes[int(proc_rank)]))
            assert (benchmark._commands[command_idx].startswith(f'numactl -N {benchmark._args.numa}'))
            for _ in range(commands_per_device):
                assert (f'--filename={block_device}' in benchmark._commands[command_idx])
                command_idx += 1

        # Test invalid envs
        os.environ['PROC_RANK'] = '4'
        benchmark = benchmark_class(benchmark_name, parameters=param_str)
        assert (benchmark)
        ret = benchmark._preprocess()
        assert (ret is False)
        assert (benchmark.return_code == ReturnCode.INVALID_ARGUMENT)
        assert (benchmark.name == 'disk-benchmark')
        assert (benchmark.type == BenchmarkType.MICRO)

        del os.environ['NUMA_NODES']
        del os.environ['PROC_RANK']

237
238
    @decorator.load_data('tests/data/disk_performance.log')
    def test_disk_performance_result_parsing(self, test_raw_output):
239
240
241
242
243
244
245
246
247
248
249
250
251
252
        """Test disk-performance benchmark result parsing."""
        benchmark_name = 'disk-benchmark'
        (benchmark_class,
         predefine_params) = BenchmarkRegistry._BenchmarkRegistry__select_benchmark(benchmark_name, Platform.CPU)
        assert (benchmark_class)
        benchmark = benchmark_class(benchmark_name, parameters='')
        assert (benchmark)
        ret = benchmark._preprocess()
        assert (ret is True)
        assert (benchmark.return_code == ReturnCode.SUCCESS)
        assert (benchmark.name == 'disk-benchmark')
        assert (benchmark.type == BenchmarkType.MICRO)

        # Positive case - valid raw output.
253
        jobname_prefix = 'nvme0n1_rand_read_write'
254
255
256
257
        assert (benchmark._process_raw_result(0, test_raw_output))
        assert (benchmark.return_code == ReturnCode.SUCCESS)

        # bs + <read, write> x <iops, 95th, 99th, 99.9th>
258
        assert (9 + benchmark.default_metric_count == len(benchmark.result.keys()))
259

260
261
262
263
264
265
266
267
        assert (1 == len(benchmark.result[jobname_prefix + '_bs']))
        assert (4096 == benchmark.result[jobname_prefix + '_bs'][0])

        assert (1 == len(benchmark.result[jobname_prefix + '_read_iops']))
        assert (85138.890741 == benchmark.result[jobname_prefix + '_read_iops'][0])
        assert (1 == len(benchmark.result[jobname_prefix + '_write_iops']))
        assert (85066.128925 == benchmark.result[jobname_prefix + '_write_iops'][0])

268
269
270
271
272
273
274
275
276
277
278
279
280
        assert (1 == len(benchmark.result[jobname_prefix + '_read_lat_ns_95.0']))
        assert (1941504 == benchmark.result[jobname_prefix + '_read_lat_ns_95.0'][0])
        assert (1 == len(benchmark.result[jobname_prefix + '_read_lat_ns_99.0']))
        assert (2244608 == benchmark.result[jobname_prefix + '_read_lat_ns_99.0'][0])
        assert (1 == len(benchmark.result[jobname_prefix + '_read_lat_ns_99.9']))
        assert (3620864 == benchmark.result[jobname_prefix + '_read_lat_ns_99.9'][0])

        assert (1 == len(benchmark.result[jobname_prefix + '_write_lat_ns_95.0']))
        assert (1908736 == benchmark.result[jobname_prefix + '_write_lat_ns_95.0'][0])
        assert (1 == len(benchmark.result[jobname_prefix + '_write_lat_ns_99.0']))
        assert (2072576 == benchmark.result[jobname_prefix + '_write_lat_ns_99.0'][0])
        assert (1 == len(benchmark.result[jobname_prefix + '_write_lat_ns_99.9']))
        assert (2605056 == benchmark.result[jobname_prefix + '_write_lat_ns_99.9'][0])
281
282
283
284

        # Negative case - invalid raw output.
        assert (benchmark._process_raw_result(1, 'Invalid raw output') is False)
        assert (benchmark.return_code == ReturnCode.MICROBENCHMARK_RESULT_PARSING_FAILURE)