model_base.py 12.6 KB
Newer Older
1
2
3
4
5
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.

"""Module of the model-benchmark base class."""

6
import math
7
import time
8
9
10
11
12
13
14
15
from abc import abstractmethod

from superbench.common.utils import logger
from superbench.benchmarks import Precision, ModelAction, BenchmarkType, ReturnCode
from superbench.benchmarks.base import Benchmark
from superbench.benchmarks.context import Enum


16
17
18
19
20
21
22
class Optimizer(Enum):
    """The Enum class representing different optimizers."""
    SGD = 'sgd'
    ADAM = 'adam'
    ADAMW = 'adamw'


23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
class DistributedImpl(Enum):
    """The Enum class representing different distributed implementations."""
    DDP = 'ddp'
    MIRRORED = 'mirrored'
    MW_MIRRORED = 'multiworkermirrored'
    PS = 'parameterserver'
    HOROVOD = 'horovod'


class DistributedBackend(Enum):
    """The Enum class representing different distributed backends."""
    NCCL = 'nccl'
    MPI = 'mpi'
    GLOO = 'gloo'


class ModelBenchmark(Benchmark):
    """The base class of E2E model benchmarks."""
    def __init__(self, name, parameters=''):
        """Constructor.

        Args:
            name (str): benchmark name.
            parameters (str): benchmark parameters.
        """
        super().__init__(name, parameters)

        self._benchmark_type = BenchmarkType.MODEL
51
52
        self._world_size = 1
        self._local_rank = None
53
54
55
        self._dataset = None
        self._dataloader = None
        self._model = None
56
        self._optimizer_type = None
57
58
59
60
        self._optimizer = None
        self._loss_fn = None
        self._target = None
        self._supported_precision = []
61
        self._gpu_available = None
62
63
64
65
66
67
68
69
70
71

    def add_parser_arguments(self):
        """Add the specified arguments."""
        super().add_parser_arguments()

        self._parser.add_argument(
            '--num_warmup',
            type=int,
            default=64,
            required=False,
72
            help='The number of warmup step.',
73
74
75
76
77
78
        )
        self._parser.add_argument(
            '--num_steps',
            type=int,
            default=2048,
            required=False,
79
            help='The number of test step.',
80
        )
81
82
83
84
85
86
87
        self._parser.add_argument(
            '--sample_count',
            type=int,
            default=128,
            required=False,
            help='The number of data samples in dataset.',
        )
88
89
90
91
92
        self._parser.add_argument(
            '--batch_size',
            type=int,
            default=32,
            required=False,
93
            help='The number of batch size.',
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
        )
        self._parser.add_argument(
            '--precision',
            type=Precision,
            default=[Precision.FLOAT32, Precision.FLOAT16],
            nargs='+',
            required=False,
            help='Model precision. E.g. {}.'.format(' '.join(Precision.get_values())),
        )
        self._parser.add_argument(
            '--model_action',
            type=ModelAction,
            default=[ModelAction.TRAIN],
            nargs='+',
            required=False,
            help='Benchmark model process. E.g. {}.'.format(' '.join(ModelAction.get_values())),
        )
        self._parser.add_argument(
            '--distributed_impl',
            type=DistributedImpl,
            default=None,
            required=False,
116
            help='Distributed implementations. E.g. {}.'.format(' '.join(DistributedImpl.get_values())),
117
118
119
120
121
122
123
        )

        self._parser.add_argument(
            '--distributed_backend',
            type=DistributedBackend,
            default=None,
            required=False,
124
125
126
127
128
129
130
131
            help='Distributed backends. E.g. {}.'.format(' '.join(DistributedBackend.get_values())),
        )

        self._parser.add_argument(
            '--no_gpu',
            action='store_true',
            default=False,
            help='Disable GPU training.',
132
133
        )

134
135
136
137
138
    @abstractmethod
    def _judge_gpu_availability(self):
        """Judge GPUs' availability according to arguments and running environment."""
        pass

139
140
    @abstractmethod
    def _init_distributed_setting(self):
141
142
143
144
145
        """Initialize the distributed library and bind the worker to GPU.

        Return:
            True if distributed library is initialized successfully.
        """
146
147
148
149
        pass

    @abstractmethod
    def _generate_dataset(self):
150
151
152
153
154
        """Generate dataset for benchmarking according to shape info.

        Return:
            True if dataset is created successfully.
        """
155
156
157
158
        pass

    @abstractmethod
    def _init_dataloader(self):
159
160
161
162
163
        """Initialize the dataloader.

        Return:
            True if dataloader is created successfully.
        """
164
165
166
167
168
169
170
171
        pass

    def _preprocess(self):
        """Preprocess/preparation operations before the benchmarking.

        Return:
            True if _preprocess() succeed.
        """
172
173
174
        if not super()._preprocess():
            return False

175
176
177
        self._judge_gpu_availability()
        logger.info('GPU availablility - model: {}, availablility: {}.'.format(self._name, self._gpu_available))

178
179
180
181
        if not self._init_distributed_setting():
            self._result.set_return_code(ReturnCode.DISTRIBUTED_SETTING_INIT_FAILURE)
            return False

182
183
184
        # Set sample_count aligned with batch_size.
        self._args.sample_count = math.ceil(self._args.sample_count / self._args.batch_size) * self._args.batch_size

185
186
187
188
189
190
        if not self._generate_dataset():
            self._result.set_return_code(ReturnCode.DATASET_GENERATION_FAILURE)
            return False

        if not self._init_dataloader():
            self._result.set_return_code(ReturnCode.DATALOADER_INIT_FAILURE)
191
192
193
194
195
196
            return False

        return True

    @abstractmethod
    def _create_optimizer(self):
197
198
199
200
201
        """Create the optimzier instance used for training and wrap with distributed library if need.

        Return:
            True if optimizer instance is created successfully.
        """
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
        pass

    @abstractmethod
    def _create_model(self, precision):
        """Construct the model for benchmarking.

        Args:
            precision (Precision): precision of model and input data, such as float32, float16.
        """
        pass

    def __train(self, precision):
        """Launch the training benchmark.

        Args:
            precision (Precision): precision of model and input data, such as float32, float16.

        Return:
            True if step_times list is not empty.
        """
222
223
224
225
226
227
228
229
        if not self._create_model(precision):
            self._result.set_return_code(ReturnCode.MODEL_CREATION_FAILURE)
            return False

        if not self._create_optimizer():
            self._result.set_return_code(ReturnCode.OPTIMIZER_CREATION_FAILURE)
            return False

230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
        # The unit of step time should be millisecond.
        step_times = self._train_step(precision)
        if len(step_times) == 0:
            logger.error(
                'Step time list for training is empty - round: {}, model: {}, precision: {}.'.format(
                    self._curr_run_index, self._name, precision
                )
            )
            return False

        average_time = sum(step_times) / len(step_times)
        logger.info(
            'Average train time - round: {}, model: {}, precision: {}, step time: {:.6f} ms.'.format(
                self._curr_run_index, self._name, precision, average_time
            )
        )

        self.__process_model_result(ModelAction.TRAIN, precision, step_times)
        return True

    def __inference(self, precision):
        """Launch the inference benchmark.

        Args:
            precision (Precision): precision of model and input data, such as float32, float16.

        Return:
            True if step_times list is not empty.
        """
        self._create_model(precision)
        # The unit of step time should be millisecond.
        step_times = self._inference_step(precision)
        if len(step_times) == 0:
            logger.error(
                'Step time list for inference is empty - round: {}, model: {}, precision: {}.'.format(
                    self._curr_run_index, self._name, precision
                )
            )
            return False

        average_time = sum(step_times) / len(step_times)
        logger.info(
            'Average inference time - round: {}, model: {}, precision: {}, step time: {:.6f} ms.'.format(
                self._curr_run_index, self._name, precision, average_time
            )
        )

        self.__process_model_result(ModelAction.INFERENCE, precision, step_times)
        return True

    @abstractmethod
    def _train_step(self, precision):
        """Define the training process.

        Args:
            precision (Precision): precision of model and input data, such as float32, float16.

        Return:
            The step-time list of every training step.
        """
        pass

    @abstractmethod
    def _inference_step(self, precision):
        """Define the inference process.

        Args:
            precision (Precision): precision of model and input data,
              such as float32, float16.

        Return:
            The latency list of every inference operation.
        """
        pass

    def _benchmark(self):
        """Implementation for benchmarking.

        Return:
            True if run benchmark successfully.
        """
        precision_need_to_run = list()
        for precision in self._args.precision:
            # Check if the precision is supported or not.
            if precision not in self._supported_precision:
                logger.warning(
                    'Can not run with specified precision - model: {}, supprted precision: {}, specified precision: {}'.
                    format(self._name, ' '.join([p.value for p in self._supported_precision]), precision)
                )
            else:
                precision_need_to_run.append(precision)

        if len(precision_need_to_run) == 0:
            self._result.set_return_code(ReturnCode.NO_SUPPORTED_PRECISION)
            return False

        for precision in precision_need_to_run:
            for model_action in self._args.model_action:
328
                self._sub_benchmark_start_time = time.time()
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
                if model_action == ModelAction.TRAIN:
                    if not self.__train(precision):
                        self._result.set_return_code(ReturnCode.MODEL_TRAIN_FAILURE)
                        return False
                elif model_action == ModelAction.INFERENCE:
                    if not self.__inference(precision):
                        self._result.set_return_code(ReturnCode.MODEL_INFERENCE_FAILURE)
                        return False
                else:
                    logger.warning(
                        'Model action has no implementation yet - model: {}, model_action: {}'.format(
                            self._name, model_action
                        )
                    )

        return True

346
347
348
349
350
351
352
353
354
355
356
    def _is_finished(self, curr_step, curr_time):
        total_steps = self._args.num_warmup + self._args.num_steps

        if (
            (self._args.duration > 0 and (curr_time - self._sub_benchmark_start_time) >= self._args.duration)
            or (total_steps > 0 and curr_step >= total_steps)
        ):
            return True

        return False

357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
    def __process_model_result(self, model_action, precision, step_times):
        """Function to process raw results and save the summarized results.

        Args:
            model_action (ModelAction): train or inference.
            precision (Precision): precision of model and input data, such as float32, float16.
            step_times (list): The step time list of every training/inference step, unit is millisecond.
        """
        metric = 'steptime_{}_{}'.format(model_action.value, precision.value)
        self._result.add_raw_data(metric, step_times)
        avg = sum(step_times) / len(step_times)
        self._result.add_result(metric, avg)

        # The unit of step time is millisecond, use it to calculate the throughput with the unit samples/sec.
        millisecond_per_second = 1000
        throughput = [millisecond_per_second / step_time * self._args.batch_size for step_time in step_times]
        metric = 'throughput_{}_{}'.format(model_action.value, precision.value)
        self._result.add_raw_data(metric, throughput)
        avg = sum(throughput) / len(throughput)
        self._result.add_result(metric, avg)

    @abstractmethod
379
    def _cal_params_count(self):
380
381
382
383
384
385
386
387
388
389
390
        """Calculate the parameters scale of the model.

        Return:
            The count of trainable parameters.
        """
        pass

    def print_env_info(self):
        """Print environments or dependencies information."""
        # TODO: will implement it when add real benchmarks in the future.
        pass