model_base.py 12.4 KB
Newer Older
1
2
3
4
5
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

"""Module of the model-benchmark base class."""

6
import math
7
import time
8
9
10
from abc import abstractmethod

from superbench.common.utils import logger
11
from superbench.benchmarks import Precision, ModelAction, DistributedImpl, DistributedBackend, BenchmarkType, ReturnCode
12
13
14
15
from superbench.benchmarks.base import Benchmark
from superbench.benchmarks.context import Enum


16
17
18
19
20
21
22
class Optimizer(Enum):
    """The Enum class representing different optimizers."""
    SGD = 'sgd'
    ADAM = 'adam'
    ADAMW = 'adamw'


23
24
25
26
27
28
29
30
31
32
33
34
class ModelBenchmark(Benchmark):
    """The base class of E2E model benchmarks."""
    def __init__(self, name, parameters=''):
        """Constructor.

        Args:
            name (str): benchmark name.
            parameters (str): benchmark parameters.
        """
        super().__init__(name, parameters)

        self._benchmark_type = BenchmarkType.MODEL
35
36
        self._world_size = 1
        self._local_rank = None
37
38
39
        self._dataset = None
        self._dataloader = None
        self._model = None
40
        self._optimizer_type = None
41
42
43
44
        self._optimizer = None
        self._loss_fn = None
        self._target = None
        self._supported_precision = []
45
        self._gpu_available = None
46
47
48
49
50
51
52
53
54
55

    def add_parser_arguments(self):
        """Add the specified arguments."""
        super().add_parser_arguments()

        self._parser.add_argument(
            '--num_warmup',
            type=int,
            default=64,
            required=False,
56
            help='The number of warmup step.',
57
58
59
60
61
62
        )
        self._parser.add_argument(
            '--num_steps',
            type=int,
            default=2048,
            required=False,
63
            help='The number of test step.',
64
        )
65
66
67
        self._parser.add_argument(
            '--sample_count',
            type=int,
68
            default=1024,
69
70
71
            required=False,
            help='The number of data samples in dataset.',
        )
72
73
74
75
76
        self._parser.add_argument(
            '--batch_size',
            type=int,
            default=32,
            required=False,
77
            help='The number of batch size.',
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
        )
        self._parser.add_argument(
            '--precision',
            type=Precision,
            default=[Precision.FLOAT32, Precision.FLOAT16],
            nargs='+',
            required=False,
            help='Model precision. E.g. {}.'.format(' '.join(Precision.get_values())),
        )
        self._parser.add_argument(
            '--model_action',
            type=ModelAction,
            default=[ModelAction.TRAIN],
            nargs='+',
            required=False,
            help='Benchmark model process. E.g. {}.'.format(' '.join(ModelAction.get_values())),
        )
        self._parser.add_argument(
            '--distributed_impl',
            type=DistributedImpl,
            default=None,
            required=False,
100
            help='Distributed implementations. E.g. {}.'.format(' '.join(DistributedImpl.get_values())),
101
102
103
104
105
106
107
        )

        self._parser.add_argument(
            '--distributed_backend',
            type=DistributedBackend,
            default=None,
            required=False,
108
109
110
111
112
113
114
115
            help='Distributed backends. E.g. {}.'.format(' '.join(DistributedBackend.get_values())),
        )

        self._parser.add_argument(
            '--no_gpu',
            action='store_true',
            default=False,
            help='Disable GPU training.',
116
117
        )

118
119
120
121
122
123
124
        self._parser.add_argument(
            '--pin_memory',
            action='store_true',
            default=False,
            help='Enable option to pin memory in data loader.',
        )

125
126
127
128
129
    @abstractmethod
    def _judge_gpu_availability(self):
        """Judge GPUs' availability according to arguments and running environment."""
        pass

130
131
    @abstractmethod
    def _init_distributed_setting(self):
132
133
134
135
136
        """Initialize the distributed library and bind the worker to GPU.

        Return:
            True if distributed library is initialized successfully.
        """
137
138
139
140
        pass

    @abstractmethod
    def _generate_dataset(self):
141
142
143
144
145
        """Generate dataset for benchmarking according to shape info.

        Return:
            True if dataset is created successfully.
        """
146
147
148
149
        pass

    @abstractmethod
    def _init_dataloader(self):
150
151
152
153
154
        """Initialize the dataloader.

        Return:
            True if dataloader is created successfully.
        """
155
156
157
158
159
160
161
162
        pass

    def _preprocess(self):
        """Preprocess/preparation operations before the benchmarking.

        Return:
            True if _preprocess() succeed.
        """
163
164
165
        if not super()._preprocess():
            return False

166
        self._judge_gpu_availability()
167
168
169
170
171
        logger.info(
            'Model placement - model: {}, GPU availablility: {}, pin memory: {}.'.format(
                self._name, self._gpu_available, self._args.pin_memory
            )
        )
172

173
174
175
176
        if not self._init_distributed_setting():
            self._result.set_return_code(ReturnCode.DISTRIBUTED_SETTING_INIT_FAILURE)
            return False

177
178
179
        # Set sample_count aligned with batch_size.
        self._args.sample_count = math.ceil(self._args.sample_count / self._args.batch_size) * self._args.batch_size

180
181
182
183
184
185
        if not self._generate_dataset():
            self._result.set_return_code(ReturnCode.DATASET_GENERATION_FAILURE)
            return False

        if not self._init_dataloader():
            self._result.set_return_code(ReturnCode.DATALOADER_INIT_FAILURE)
186
187
188
189
190
191
            return False

        return True

    @abstractmethod
    def _create_optimizer(self):
192
193
194
195
196
        """Create the optimzier instance used for training and wrap with distributed library if need.

        Return:
            True if optimizer instance is created successfully.
        """
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
        pass

    @abstractmethod
    def _create_model(self, precision):
        """Construct the model for benchmarking.

        Args:
            precision (Precision): precision of model and input data, such as float32, float16.
        """
        pass

    def __train(self, precision):
        """Launch the training benchmark.

        Args:
            precision (Precision): precision of model and input data, such as float32, float16.

        Return:
            True if step_times list is not empty.
        """
217
218
219
220
221
222
223
224
        if not self._create_model(precision):
            self._result.set_return_code(ReturnCode.MODEL_CREATION_FAILURE)
            return False

        if not self._create_optimizer():
            self._result.set_return_code(ReturnCode.OPTIMIZER_CREATION_FAILURE)
            return False

225
226
        # The unit of step time should be millisecond.
        step_times = self._train_step(precision)
227
        if not self.__process_model_result(ModelAction.TRAIN, precision, step_times):
228
            self._result.set_return_code(ReturnCode.INVALID_BENCHMARK_RESULT)
229
230
231
232
            return False

        logger.info(
            'Average train time - round: {}, model: {}, precision: {}, step time: {:.6f} ms.'.format(
233
234
                self._curr_run_index, self._name, precision,
                sum(step_times) / len(step_times)
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
            )
        )

        return True

    def __inference(self, precision):
        """Launch the inference benchmark.

        Args:
            precision (Precision): precision of model and input data, such as float32, float16.

        Return:
            True if step_times list is not empty.
        """
        self._create_model(precision)
        # The unit of step time should be millisecond.
        step_times = self._inference_step(precision)
252
        if not self.__process_model_result(ModelAction.INFERENCE, precision, step_times):
253
            self._result.set_return_code(ReturnCode.INVALID_BENCHMARK_RESULT)
254
255
256
257
            return False

        logger.info(
            'Average inference time - round: {}, model: {}, precision: {}, step time: {:.6f} ms.'.format(
258
259
                self._curr_run_index, self._name, precision,
                sum(step_times) / len(step_times)
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
            )
        )

        return True

    @abstractmethod
    def _train_step(self, precision):
        """Define the training process.

        Args:
            precision (Precision): precision of model and input data, such as float32, float16.

        Return:
            The step-time list of every training step.
        """
        pass

    @abstractmethod
    def _inference_step(self, precision):
        """Define the inference process.

        Args:
            precision (Precision): precision of model and input data,
              such as float32, float16.

        Return:
            The latency list of every inference operation.
        """
        pass

    def _benchmark(self):
        """Implementation for benchmarking.

        Return:
            True if run benchmark successfully.
        """
        precision_need_to_run = list()
        for precision in self._args.precision:
            # Check if the precision is supported or not.
            if precision not in self._supported_precision:
                logger.warning(
                    'Can not run with specified precision - model: {}, supprted precision: {}, specified precision: {}'.
                    format(self._name, ' '.join([p.value for p in self._supported_precision]), precision)
                )
            else:
                precision_need_to_run.append(precision)

        if len(precision_need_to_run) == 0:
            self._result.set_return_code(ReturnCode.NO_SUPPORTED_PRECISION)
            return False

        for precision in precision_need_to_run:
            for model_action in self._args.model_action:
313
                self._sub_benchmark_start_time = time.time()
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
                if model_action == ModelAction.TRAIN:
                    if not self.__train(precision):
                        return False
                elif model_action == ModelAction.INFERENCE:
                    if not self.__inference(precision):
                        return False
                else:
                    logger.warning(
                        'Model action has no implementation yet - model: {}, model_action: {}'.format(
                            self._name, model_action
                        )
                    )

        return True

329
330
331
332
333
334
335
336
337
338
339
    def _is_finished(self, curr_step, curr_time):
        total_steps = self._args.num_warmup + self._args.num_steps

        if (
            (self._args.duration > 0 and (curr_time - self._sub_benchmark_start_time) >= self._args.duration)
            or (total_steps > 0 and curr_step >= total_steps)
        ):
            return True

        return False

340
341
342
343
344
345
346
    def __process_model_result(self, model_action, precision, step_times):
        """Function to process raw results and save the summarized results.

        Args:
            model_action (ModelAction): train or inference.
            precision (Precision): precision of model and input data, such as float32, float16.
            step_times (list): The step time list of every training/inference step, unit is millisecond.
347
348
349

        Return:
            True if step_times list is not empty.
350
        """
351
352
353
354
355
356
357
358
359
        if len(step_times) == 0:
            logger.error(
                'Step time list is empty - round: {}, model: {}, model_action: {}, precision: {}.'.format(
                    self._curr_run_index, self._name, model_action, precision
                )
            )
            return False

        metric = 'steptime_{}_{}'.format(model_action, precision)
360
361
362
363
364
365
366
        self._result.add_raw_data(metric, step_times)
        avg = sum(step_times) / len(step_times)
        self._result.add_result(metric, avg)

        # The unit of step time is millisecond, use it to calculate the throughput with the unit samples/sec.
        millisecond_per_second = 1000
        throughput = [millisecond_per_second / step_time * self._args.batch_size for step_time in step_times]
367
        metric = 'throughput_{}_{}'.format(model_action, precision)
368
369
370
371
        self._result.add_raw_data(metric, throughput)
        avg = sum(throughput) / len(throughput)
        self._result.add_result(metric, avg)

372
373
        return True

374
    @abstractmethod
375
    def _cal_params_count(self):
376
377
378
379
380
381
382
383
384
385
386
        """Calculate the parameters scale of the model.

        Return:
            The count of trainable parameters.
        """
        pass

    def print_env_info(self):
        """Print environments or dependencies information."""
        # TODO: will implement it when add real benchmarks in the future.
        pass