base.py 10.6 KB
Newer Older
1
2
3
4
5
6
7
8
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

"""Module of the base class."""

import argparse
import numbers
from datetime import datetime
9
from operator import attrgetter
10
11
from abc import ABC, abstractmethod

12
13
import numpy as np

14
15
16
17
18
from superbench.common.utils import logger
from superbench.benchmarks import BenchmarkType, ReturnCode
from superbench.benchmarks.result import BenchmarkResult


19
20
21
22
23
24
25
26
27
28
29
class SortedMetavarTypeHelpFormatter(argparse.MetavarTypeHelpFormatter):
    """Custom HelpFormatter class for argparse which sorts option strings."""
    def add_arguments(self, actions):
        """Sort option strings before original add_arguments.

        Args:
            actions (argparse.Action): Argument parser actions.
        """
        super(SortedMetavarTypeHelpFormatter, self).add_arguments(sorted(actions, key=attrgetter('option_strings')))


30
31
32
33
34
35
36
37
38
39
class Benchmark(ABC):
    """The base class of all benchmarks."""
    def __init__(self, name, parameters=''):
        """Constructor.

        Args:
            name (str): benchmark name.
            parameters (str): benchmark parameters.
        """
        self._name = name
40
        self._argv = list(filter(None, parameters.split(' '))) if parameters is not None else list()
41
42
43
44
45
        self._benchmark_type = None
        self._parser = argparse.ArgumentParser(
            add_help=False,
            usage=argparse.SUPPRESS,
            allow_abbrev=False,
46
            formatter_class=SortedMetavarTypeHelpFormatter,
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
        )
        self._args = None
        self._curr_run_index = 0
        self._result = None

    def add_parser_arguments(self):
        """Add the specified arguments."""
        self._parser.add_argument(
            '--run_count',
            type=int,
            default=1,
            required=False,
            help='The run count of benchmark.',
        )
        self._parser.add_argument(
            '--duration',
            type=int,
            default=0,
            required=False,
            help='The elapsed time of benchmark in seconds.',
        )

    def get_configurable_settings(self):
        """Get all the configurable settings.

        Return:
            All configurable settings in raw string.
        """
        return self._parser.format_help().strip()

77
    def parse_args(self, ignore_invalid=False):
78
79
80
81
82
83
84
85
86
87
        """Parse the arguments.

        Return:
            ret (bool): whether parse succeed or not.
            args (argparse.Namespace): parsed arguments.
            unknown (list): unknown arguments.
        """
        try:
            args, unknown = self._parser.parse_known_args(self._argv)
        except BaseException as e:
88
89
90
91
92
93
            if ignore_invalid:
                logger.info('Missing or invliad parameters, will ignore the error and skip the args checking.')
                return True, None, []
            else:
                logger.error('Invalid argument - benchmark: {}, message: {}.'.format(self._name, str(e)))
                return False, None, []
94

95
        ret = True
96
        if len(unknown) > 0:
97
98
            logger.error(
                'Unknown arguments - benchmark: {}, unknown arguments: {}'.format(self._name, ' '.join(unknown))
99
            )
100
            ret = False
101

102
        return ret, args, unknown
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129

    def _preprocess(self):
        """Preprocess/preparation operations before the benchmarking.

        Return:
            True if _preprocess() succeed.
        """
        self.add_parser_arguments()
        ret, self._args, unknown = self.parse_args()

        if not ret:
            self._result = BenchmarkResult(self._name, self._benchmark_type, ReturnCode.INVALID_ARGUMENT)
            return False

        self._result = BenchmarkResult(
            self._name, self._benchmark_type, ReturnCode.SUCCESS, run_count=self._args.run_count
        )

        if not isinstance(self._benchmark_type, BenchmarkType):
            logger.error(
                'Invalid benchmark type - benchmark: {}, type: {}'.format(self._name, type(self._benchmark_type))
            )
            self._result.set_return_code(ReturnCode.INVALID_BENCHMARK_TYPE)
            return False

        return True

130
131
132
133
134
135
136
137
    def _postprocess(self):
        """Postprocess/cleanup operations after the benchmarking.

        Return:
            True if _postprocess() succeed.
        """
        return True

138
139
140
141
142
143
144
145
146
147
148
    @abstractmethod
    def _benchmark(self):
        """Implementation for benchmarking."""
        pass

    def run(self):
        """Function to launch the benchmarking.

        Return:
            True if run benchmark successfully.
        """
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
        ret = True
        try:
            ret &= self._preprocess()
            if ret:
                self._start_time = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
                for self._curr_run_index in range(self._args.run_count):
                    ret &= self._benchmark()
                self._end_time = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')
                self._result.set_timestamp(self._start_time, self._end_time)

                if ret:
                    ret &= self.__check_result_format()
        except BaseException as e:
            self._result.set_return_code(ReturnCode.RUNTIME_EXCEPTION_ERROR)
            logger.error('Run benchmark failed - benchmark: {}, message: {}'.format(self._name, str(e)))
        finally:
            ret &= self._postprocess()
166

167
        return ret
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194

    def __check_result_format(self):
        """Check the validation of result object.

        Return:
            True if the result is valid.
        """
        if (not self.__check_result_type()) or (not self.__check_summarized_result()) or (not self.__check_raw_data()):
            self._result.set_return_code(ReturnCode.INVALID_BENCHMARK_RESULT)
            return False

        return True

    def __check_result_type(self):
        """Check the type of result object.

        Return:
            True if the result is instance of BenchmarkResult.
        """
        if not isinstance(self._result, BenchmarkResult):
            logger.error(
                'Invalid benchmark result type - benchmark: {}, type: {}'.format(self._name, type(self._result))
            )
            return False

        return True

195
196
197
198
199
200
201
202
203
204
    def __is_list_type(self, data, t):
        if isinstance(data, list) and all(isinstance(item, t) for item in data):
            return True
        return False

    def __is_list_list_type(self, data, t):
        if (self.__is_list_type(data, list) and all(isinstance(value, t) for item in data for value in item)):
            return True
        return False

205
206
207
208
209
210
211
    def __check_summarized_result(self):
        """Check the validation of summary result.

        Return:
            True if the summary result is instance of List[Number].
        """
        for metric in self._result.result:
212
            if not self.__is_list_type(self._result.result[metric], numbers.Number):
213
                logger.error(
214
215
                    'Invalid summarized result - benchmark: {}, metric: {}, result: {}.'.format(
                        self._name, metric, self._result.result[metric]
216
217
218
219
220
221
222
223
224
225
226
                    )
                )
                return False

        return True

    def __check_raw_data(self):
        """Check the validation of raw data.

        Return:
            True if the raw data is:
227
228
229
              instance of List[List[Number]] for BenchmarkType.MODEL.
              instance of List[str] for BenchmarkType.DOCKER.
              instance of List[List[Number]] or List[str] for BenchmarkType.MICRO.
230
231
        """
        for metric in self._result.raw_data:
232
233
234
235
236
237
238
239
240
            is_valid = True
            if self._benchmark_type == BenchmarkType.MODEL:
                is_valid = self.__is_list_list_type(self._result.raw_data[metric], numbers.Number)
            elif self._benchmark_type == BenchmarkType.DOCKER:
                is_valid = self.__is_list_type(self._result.raw_data[metric], str)
            elif self._benchmark_type == BenchmarkType.MICRO:
                is_valid = self.__is_list_type(self._result.raw_data[metric], str) or self.__is_list_list_type(
                    self._result.raw_data[metric], numbers.Number
                )
241
242
            if not is_valid:
                logger.error(
243
244
                    'Invalid raw data type - benchmark: {}, metric: {}, raw data: {}.'.format(
                        self._name, metric, self._result.raw_data[metric]
245
246
247
248
249
250
                    )
                )
                return False

        return True

251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
    def _process_percentile_result(self, metric, result, reduce_type=None):
        """Function to process the percentile results.

        Args:
            metric (str): metric name which is the key.
            result (List[numbers.Number]): numerical result.
            reduce_type (ReduceType): The type of reduce function.
        """
        if len(result) > 0:
            percentile_list = ['50', '90', '95', '99', '99.9']
            for percentile in percentile_list:
                self._result.add_result(
                    '{}_{}'.format(metric, percentile),
                    np.percentile(result, float(percentile), interpolation='nearest'), reduce_type
                )

267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
    def print_env_info(self):
        """Print environments or dependencies information."""
        # TODO: will implement it when add real benchmarks in the future.
        pass

    @property
    def name(self):
        """Decoration function to access benchmark name."""
        return self._result.name

    @property
    def type(self):
        """Decoration function to access benchmark type."""
        return self._result.type

    @property
    def run_count(self):
        """Decoration function to access benchmark run_count."""
        return self._result.run_count

    @property
    def return_code(self):
        """Decoration function to access benchmark return_code."""
        return self._result.return_code

    @property
    def start_time(self):
        """Decoration function to access benchmark start_time."""
        return self._result.start_time

    @property
    def end_time(self):
        """Decoration function to access benchmark end_time."""
        return self._result.end_time

    @property
    def raw_data(self):
        """Decoration function to access benchmark raw_data."""
        return self._result.raw_data

    @property
    def result(self):
        """Decoration function to access benchmark result."""
        return self._result.result

    @property
    def serialized_result(self):
        """Decoration function to access benchmark result."""
        return self._result.to_string()
316
317
318
319
320

    @property
    def default_metric_count(self):
        """Decoration function to get the count of default metrics."""
        return self._result.default_metric_count