ib_validation_performance.py 14.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

"""Module of the IB performance benchmarks."""

import os

from superbench.common.utils import logger
from superbench.benchmarks import BenchmarkRegistry, ReturnCode
from superbench.common.devices import GPU
from superbench.benchmarks.micro_benchmarks import MicroBenchmarkWithInvoke


class IBBenchmark(MicroBenchmarkWithInvoke):
    """The IB validation performance benchmark class."""
    def __init__(self, name, parameters=''):
        """Constructor.

        Args:
            name (str): benchmark name.
            parameters (str): benchmark parameters.
        """
        super().__init__(name, parameters)

        self._bin_name = 'ib_validation'
        self.__support_ib_commands = [
            'ib_write_bw', 'ib_read_bw', 'ib_send_bw', 'ib_write_lat', 'ib_read_lat', 'ib_send_lat'
        ]
        self.__patterns = ['one-to-one', 'one-to-many', 'many-to-one']
30
        self.__config_path = os.path.join(os.getcwd(), 'config.txt')
31
32
33
34
35
36
37
        self.__config = []

    def add_parser_arguments(self):
        """Add the specified arguments."""
        super().add_parser_arguments()

        self._parser.add_argument(
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
            '--ib_dev',
            type=str,
            default='mlx5_0',
            required=False,
            help='The IB device, e.g., mlx5_0, mlx5_$LOCAL_RANK, mlx5_$((LOCAL_RANK/2)), etc.',
        )
        self._parser.add_argument(
            '--gpu_dev',
            type=str,
            default=None,
            required=False,
            help='The GPU device, e.g., 0, $LOCAL_RANK, $((LOCAL_RANK/2)), etc.',
        )
        self._parser.add_argument(
            '--numa_dev',
            type=str,
            default=None,
55
            required=False,
56
            help='The NUMA node to bind, e.g., 0, $LOCAL_RANK, $((LOCAL_RANK/2)), etc.',
57
        )
58
59
60
61
62
63
64
        self._parser.add_argument(
            '--timeout',
            type=int,
            default=120,
            required=False,
            help='Timeout in seconds for each perftest command in case ib is too slow.',
        )
65
        # perftest configurations
66
67
68
69
70
        self._parser.add_argument(
            '--iters',
            type=int,
            default=5000,
            required=False,
71
            help='The iterations of perftest command',
72
73
74
75
        )
        self._parser.add_argument(
            '--msg_size',
            type=int,
76
            default=8388608,
77
            required=False,
78
            help='The message size of perftest command, e.g., 8388608.',
79
80
        )
        self._parser.add_argument(
81
82
83
84
            '--bidirectional', action='store_true', default=False, help='Measure bidirectional bandwidth.'
        )
        self._parser.add_argument(
            '--command',
85
            type=str,
86
87
88
            default='ib_write_bw',
            required=False,
            help='The perftest command to use, e.g., {}.'.format(' '.join(self.__support_ib_commands)),
89
        )
90
        # customized configurations
91
92
93
94
        self._parser.add_argument(
            '--pattern',
            type=str,
            default='one-to-one',
95
            help='IB traffic pattern type, e.g., {}.'.format(''.join(self.__patterns)),
96
97
98
99
100
101
        )
        self._parser.add_argument(
            '--config',
            type=str,
            default=None,
            required=False,
102
            help='The path of config file on the target machines.',
103
104
105
106
        )
        self._parser.add_argument(
            '--hostfile',
            type=str,
107
            default=None,
108
            required=False,
109
            help='The path of hostfile on the target machines.',
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
        )

    def __one_to_many(self, n):
        """Generate one-to-many pattern config.

        There are a total of n rounds
        In each round, The i-th participant will be paired as a client with the remaining n-1 servers.

        Args:
            n (int): the number of participants.

        Returns:
            list: the generated config list, each item in the list is a str like "0,1;2,3".
        """
        config = []
        for client in range(n):
            row = []
            for server in range(n):
                if server != client:
                    pair = '{},{}'.format(server, client)
                    row.append(pair)
            row = ';'.join(row)
            config.append(row)
        return config

    def __many_to_one(self, n):
        """Generate many-to-one pattern config.

        There are a total of n rounds
        In each round, The i-th participant will be paired as a server with the remaining n-1 clients.

        Args:
            n (int): the number of participants.

        Returns:
            list: the generated config list, each item in the list is a str like "0,1;2,3".
        """
        config = []
        for server in range(n):
            row = []
            for client in range(n):
                if server != client:
                    pair = '{},{}'.format(server, client)
                    row.append(pair)
            row = ';'.join(row)
            config.append(row)
        return config

    def __fully_one_to_one(self, n):
        """Generate one-to-one pattern config.

        One-to-one means that each participant plays every other participant once.
        The algorithm refers circle method of Round-robin tournament in
        https://en.wikipedia.org/wiki/Round-robin_tournament.
        if n is even, there are a total of n-1 rounds, with n/2 pair of 2 unique participants in each round.
        If n is odd, there will be n rounds, each with n-1/2 pairs, and one participant rotating empty in that round.
        In each round, pair up two by two from the beginning to the middle as (begin, end),(begin+1,end-1)...
        Then, all the participants except the beginning shift left one position, and repeat the previous step.

        Args:
            n (int): the number of participants.

        Returns:
            list: the generated config list, each item in the list is a str like "0,1;2,3".
        """
        config = []
        candidates = list(range(n))
        # Add a fake participant if n is odd
        if n % 2 == 1:
            candidates.append(-1)
        count = len(candidates)
        non_moving = [candidates[0]]
        for _ in range(count - 1):
            pairs = [
                '{},{}'.format(candidates[i], candidates[count - i - 1]) for i in range(0, count // 2)
                if candidates[i] != -1 and candidates[count - i - 1] != -1
            ]
            row = ';'.join(pairs)
            config.append(row)
            robin = candidates[2:] + candidates[1:2]
            candidates = non_moving + robin
        return config

    def gen_traffic_pattern(self, n, mode, config_file_path):
        """Generate traffic pattern into config file.

        Args:
            n (int): the number of nodes.
            mode (str): the traffic mode, including 'one-to-one', 'one-to-many', 'many-to-one'.
            config_file_path (str): the path of config file to generate.
        """
        config = []
        if mode == 'one-to-many':
            config = self.__one_to_many(n)
        elif mode == 'many-to-one':
            config = self.__many_to_one(n)
        elif mode == 'one-to-one':
            config = self.__fully_one_to_one(n)
        with open(config_file_path, 'w') as f:
            for line in config:
                f.write(line + '\n')

212
    def __prepare_config(self):
213
214
215
216
217
218
        """Prepare and read config file.

        Returns:
            True if the config is not empty and valid.
        """
        try:
219
220
221
222
223
            # Read the hostfile
            if not self._args.hostfile:
                self._args.hostfile = os.path.join(os.environ.get('SB_WORKSPACE', '.'), 'hostfile')
            with open(self._args.hostfile, 'r') as f:
                hosts = f.readlines()
224
225
            # Generate the config file if not define
            if self._args.config is None:
226
                self.gen_traffic_pattern(len(hosts), self._args.pattern, self.__config_path)
227
228
229
230
231
232
            # Use the config file defined in args
            else:
                self.__config_path = self._args.config
            # Read the config file and check if it's empty and valid
            with open(self.__config_path, 'r') as f:
                lines = f.readlines()
233
234
235
236
237
238
239
240
241
242
            for line in lines:
                pairs = line.strip().strip(';').split(';')
                # Check format of config
                for pair in pairs:
                    pair = pair.split(',')
                    if len(pair) != 2:
                        return False
                    pair[0] = int(pair[0])
                    pair[1] = int(pair[1])
                    self.__config.append('{}_{}'.format(hosts[pair[0]].strip(), hosts[pair[1]].strip()))
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
        except BaseException as e:
            self._result.set_return_code(ReturnCode.INVALID_ARGUMENT)
            logger.error('Failed to generate and check config - benchmark: {}, message: {}.'.format(self._name, str(e)))
            return False
        if len(self.__config) == 0:
            self._result.set_return_code(ReturnCode.INVALID_ARGUMENT)
            logger.error('No valid config - benchmark: {}.'.format(self._name))
            return False
        return True

    def __prepare_general_ib_command_params(self):
        """Prepare general params for ib commands.

        Returns:
            Str of ib command params if arguments are valid, otherwise False.
        """
        # Format the ib command type
260
        self._args.command = self._args.command.lower()
261
        # Add message size for ib command
262
        msg_size = f'-s {self._args.msg_size}' if self._args.msg_size > 0 else '-a'
263
        # Add GPUDirect for ib command
264
265
        gpu_dev = ''
        if self._args.gpu_dev is not None:
266
267
            gpu = GPU()
            if gpu.vendor == 'nvidia':
268
                gpu_dev = f'--use_cuda={self._args.gpu_dev}'
269
            elif gpu.vendor == 'amd':
270
                gpu_dev = f'--use_rocm={self._args.gpu_dev}'
271
272
273
274
275
            else:
                self._result.set_return_code(ReturnCode.INVALID_ARGUMENT)
                logger.error('No GPU found - benchmark: {}'.format(self._name))
                return False
        # Generate ib command params
276
277
        command_params = f'-F -n {self._args.iters} -d {self._args.ib_dev} {msg_size} {gpu_dev}'
        command_params = f'{command_params.strip()} --report_gbits'
278
279
280
281
282
283
284
285
286
287
288
289
        return command_params

    def _preprocess(self):
        """Preprocess/preparation operations before the benchmarking.

        Return:
            True if _preprocess() succeed.
        """
        if not super()._preprocess():
            return False

        # Generate and check config
290
        if not self.__prepare_config():
291
292
293
294
295
296
297
            return False

        # Prepare general params for ib commands
        command_params = self.__prepare_general_ib_command_params()
        if not command_params:
            return False
        # Generate commands
298
299
300
301
302
        if self._args.command not in self.__support_ib_commands:
            self._result.set_return_code(ReturnCode.INVALID_ARGUMENT)
            logger.error(
                'Unsupported ib command - benchmark: {}, command: {}, expected: {}.'.format(
                    self._name, self._args.command, ' '.join(self.__support_ib_commands)
303
                )
304
305
306
307
308
309
310
311
312
313
314
            )
            return False
        else:
            ib_command_prefix = f'{os.path.join(self._args.bin_dir, self._args.command)} {command_params}'
            if self._args.numa_dev is not None:
                ib_command_prefix = f'numactl -N {self._args.numa_dev} {ib_command_prefix}'
            if 'bw' in self._args.command and self._args.bidirectional:
                ib_command_prefix += ' -b'

            command = os.path.join(self._args.bin_dir, self._bin_name)
            command += ' --cmd_prefix ' + "'" + ib_command_prefix + "'"
315
316
            command += f' --timeout {self._args.timeout} ' + \
                f'--hostfile {self._args.hostfile} --input_config {self.__config_path}'
317
            self._commands.append(command)
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332

        return True

    def _process_raw_result(self, cmd_idx, raw_output):    # noqa: C901
        """Function to parse raw results and save the summarized results.

          self._result.add_raw_data() and self._result.add_result() need to be called to save the results.

        Args:
            cmd_idx (int): the index of command corresponding with the raw_output.
            raw_output (str): raw output string of the micro-benchmark.

        Return:
            True if the raw output string is valid and result can be extracted.
        """
333
        self._result.add_raw_data('raw_output_' + self._args.command, raw_output, self._args.log_raw_data)
334
335
336
337
338
339
340
341
342
343

        # If it's invoked by MPI and rank is not 0, no result is expected
        if os.getenv('OMPI_COMM_WORLD_RANK'):
            rank = int(os.getenv('OMPI_COMM_WORLD_RANK'))
            if rank > 0:
                return True

        valid = False
        content = raw_output.splitlines()
        config_index = 0
344
        command = self._args.command
345
346
347
348
349
350
351
352
353
354
        try:
            result_index = -1
            for index, line in enumerate(content):
                if 'results' in line:
                    result_index = index + 1
                    break
            if result_index == -1:
                valid = False
            else:
                content = content[result_index:]
355
356
357
358
359
360
361
                for line_index, line in enumerate(content):
                    line_result = list(filter(None, line.strip().split(',')))
                    for pair_index, pair_result in enumerate(line_result):
                        rank_results = list(filter(None, pair_result.strip().split(' ')))
                        for rank_index, rank_result in enumerate(rank_results):
                            metric = f'{command}_{line_index}_{pair_index}:{self.__config[config_index]}:{rank_index}'
                            value = float(rank_result)
362
363
                            # Check if the value is valid before the base conversion
                            if 'bw' in command and value >= 0.0:
364
365
366
                                value = value / 8.0
                            self._result.add_result(metric, value)
                            valid = True
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
                        config_index += 1
        except Exception:
            valid = False
        if valid is False or config_index != len(self.__config):
            logger.error(
                'The result format is invalid - round: {}, benchmark: {}, raw output: {}.'.format(
                    self._curr_run_index, self._name, raw_output
                )
            )
            return False

        return True


BenchmarkRegistry.register_benchmark('ib-traffic', IBBenchmark)