keras_imagenet_benchmark.py 47.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executes Keras benchmarks and accuracy tests."""
from __future__ import print_function

import os
19
import time
20
21

from absl import flags
22
import tensorflow as tf  # pylint: disable=g-bad-import-order
23

24
from official.benchmark import keras_benchmark
25
from official.utils.testing import benchmark_wrappers
26
from official.vision.image_classification import resnet_imagenet_main
27

Toby Boyd's avatar
Toby Boyd committed
28
29
MIN_TOP_1_ACCURACY = 0.76
MAX_TOP_1_ACCURACY = 0.77
30

Jaehong Kim's avatar
Jaehong Kim committed
31
32
33
34
35
36
37
38
39
40
MOBILENET_V1_MIN_TOP_1_ACCURACY = 0.65
MOBILENET_V1_MAX_TOP_1_ACCURACY = 0.68

# Range of top-1 accracies for model optimization techniques.
# Each item indicates (MIN_TOP_1_ACCURACY, MAX_TOP_1_ACCURACY).
MODEL_OPTIMIZATION_TOP_1_ACCURACY = {
    'RESNET50_FINETUNE_PRUNING': (0.76, 0.77),
    'MOBILENET_V1_FINETUNE_PRUNING': (0.67, 0.68),
}

Toby Boyd's avatar
Toby Boyd committed
41
FLAGS = flags.FLAGS
42
43


Toby Boyd's avatar
Toby Boyd committed
44
45
class Resnet50KerasAccuracy(keras_benchmark.KerasBenchmark):
  """Benchmark accuracy tests for ResNet50 in Keras."""
46

47
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
48
49
50
51
52
    """A benchmark class.

    Args:
      output_dir: directory where to output e.g. log files
      root_data_dir: directory under which to look for dataset
53
54
55
      **kwargs: arbitrary named arguments. This is needed to make the
                constructor forward compatible in case PerfZero provides more
                named arguments before updating the constructor.
56
57
    """

58
    flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags]
Toby Boyd's avatar
Toby Boyd committed
59

60
    self.data_dir = os.path.join(root_data_dir, 'imagenet')
61
62
    super(Resnet50KerasAccuracy, self).__init__(
        output_dir=output_dir, flag_methods=flag_methods)
63

Toby Boyd's avatar
Toby Boyd committed
64
  def benchmark_graph_8_gpu(self):
65
66
    """Test Keras model with Keras fit/dist_strat and 8 GPUs."""
    self._setup()
Toby Boyd's avatar
Toby Boyd committed
67
    FLAGS.num_gpus = 8
68
    FLAGS.data_dir = self.data_dir
69
    FLAGS.batch_size = 128 * 8
Toby Boyd's avatar
Toby Boyd committed
70
    FLAGS.train_epochs = 90
71
    FLAGS.epochs_between_evals = 10
72
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
73
    FLAGS.dtype = 'fp32'
74
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
75
76

  def benchmark_8_gpu(self):
77
78
    """Test Keras model with eager, dist_strat and 8 GPUs."""
    self._setup()
Toby Boyd's avatar
Toby Boyd committed
79
    FLAGS.num_gpus = 8
80
    FLAGS.data_dir = self.data_dir
81
    FLAGS.batch_size = 128 * 8
Toby Boyd's avatar
Toby Boyd committed
82
    FLAGS.train_epochs = 90
83
    FLAGS.epochs_between_evals = 10
84
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
85
86
    FLAGS.dtype = 'fp32'
    FLAGS.enable_eager = True
87
88
    # Add some thread tunings to improve performance.
    FLAGS.datasets_num_private_threads = 14
89
    self._run_and_report_benchmark()
90

91
92
93
94
95
96
97
98
99
  def benchmark_8_gpu_amp(self):
    """Test Keras model with eager, dist_strat and 8 GPUs with automatic mixed precision."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 128 * 8
    FLAGS.train_epochs = 90
    FLAGS.epochs_between_evals = 10
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_amp')
Vinh Nguyen's avatar
Vinh Nguyen committed
100
    FLAGS.dtype = 'fp16'
101
    FLAGS.enable_eager = True
102
    FLAGS.fp16_implementation = 'graph_rewrite'
103
104
105
    # Add some thread tunings to improve performance.
    FLAGS.datasets_num_private_threads = 14
    self._run_and_report_benchmark()
106

Reed's avatar
Reed committed
107
108
109
110
111
112
113
  def benchmark_8_gpu_fp16(self):
    """Test Keras model with eager, dist_strat, 8 GPUs, and fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
114
    FLAGS.epochs_between_evals = 10
Reed's avatar
Reed committed
115
116
117
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
118
119
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
Reed's avatar
Reed committed
120
121
122
123
124
125
126
127
128
    self._run_and_report_benchmark()

  def benchmark_xla_8_gpu_fp16(self):
    """Test Keras model with XLA, eager, dist_strat, 8 GPUs and fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
129
    FLAGS.epochs_between_evals = 10
Reed's avatar
Reed committed
130
131
132
133
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
134
135
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
Reed's avatar
Reed committed
136
137
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
  def benchmark_8_gpu_mlperf_like(self):
    """Test similar to the rules for MLPerf 0.5.

    Listed below are reasons this comparison is not to the MLSpec, but this is
    still a decent directional measurement:
      - Eval is every 4 epochs and again at the end. ~2 extra times.
      - Learning rate is not tuned to hit 75%, but we know the model is correct.
      - We measure total time and MLPerf 0.5 excluded some startup time.
      - Eval is not on the total set, need to set eval batch_size where
        8*batch_size/50K is even. 250 is a good number.
      - Not sure if we are doing any extra or too few steps due to epoch bleed.
    """
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 61
    FLAGS.epochs_between_evals = 4
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mlperf_like')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
160
    self._run_and_report_benchmark(top_1_min=0.736)
Toby Boyd's avatar
Toby Boyd committed
161

162
163
164
165
166
167
168
  def benchmark_xla_8_gpu_fp16_dynamic(self):
    """Test Keras model with XLA, eager, dist_strat, 8 GPUs, dynamic fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
169
    FLAGS.epochs_between_evals = 10
170
171
172
173
174
175
176
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.loss_scale = 'dynamic'
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
177
    self._run_and_report_benchmark(top_1_min=0.736)
178

179
  @benchmark_wrappers.enable_runtime_flags
180
181
182
  def _run_and_report_benchmark(self,
                                top_1_min=MIN_TOP_1_ACCURACY,
                                top_1_max=MAX_TOP_1_ACCURACY):
183
    start_time_sec = time.time()
184
    stats = resnet_imagenet_main.run(flags.FLAGS)
185
186
187
    wall_time_sec = time.time() - start_time_sec

    super(Resnet50KerasAccuracy, self)._report_benchmark(
Toby Boyd's avatar
Toby Boyd committed
188
        stats,
189
        wall_time_sec,
190
191
        top_1_min=top_1_min,
        top_1_max=top_1_max,
192
        total_batch_size=FLAGS.batch_size,
Toby Boyd's avatar
Toby Boyd committed
193
        log_steps=100)
194
195
196
197

  def _get_model_dir(self, folder_name):
    return os.path.join(self.output_dir, folder_name)

Toby Boyd's avatar
Toby Boyd committed
198

Jaehong Kim's avatar
Jaehong Kim committed
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
class MobilenetV1KerasAccuracy(keras_benchmark.KerasBenchmark):
  """Benchmark accuracy tests for MobilenetV1 in Keras."""

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
    """A benchmark class.

    Args:
      output_dir: directory where to output e.g. log files
      root_data_dir: directory under which to look for dataset
      **kwargs: arbitrary named arguments. This is needed to make the
                constructor forward compatible in case PerfZero provides more
                named arguments before updating the constructor.
    """

    flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags]

    self.data_dir = os.path.join(root_data_dir, 'imagenet')
    super(MobilenetV1KerasAccuracy, self).__init__(
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags={
            'model': 'mobilenet',
            'optimizer': 'mobilenet_default',
            'initial_learning_rate_per_sample': 0.00039,
        })

  def benchmark_8_gpu(self):
    """Test Keras model with eager, dist_strat and 8 GPUs."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 128 * 8
    FLAGS.train_epochs = 90
    FLAGS.epochs_between_evals = 10
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
    FLAGS.dtype = 'fp32'
    FLAGS.enable_eager = True
    # Add some thread tunings to improve performance.
    FLAGS.datasets_num_private_threads = 14
    self._run_and_report_benchmark()

  @benchmark_wrappers.enable_runtime_flags
  def _run_and_report_benchmark(self,
                                top_1_min=MOBILENET_V1_MIN_TOP_1_ACCURACY,
                                top_1_max=MOBILENET_V1_MAX_TOP_1_ACCURACY):
    start_time_sec = time.time()
    stats = resnet_imagenet_main.run(flags.FLAGS)
    wall_time_sec = time.time() - start_time_sec

    super(MobilenetV1KerasAccuracy, self)._report_benchmark(
        stats,
        wall_time_sec,
        top_1_min=top_1_min,
        top_1_max=top_1_max,
        total_batch_size=FLAGS.batch_size,
        log_steps=100)

  def _get_model_dir(self, folder_name):
    return os.path.join(self.output_dir, folder_name)


Toby Boyd's avatar
Toby Boyd committed
260
261
262
class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
  """Resnet50 benchmarks."""

David Chen's avatar
David Chen committed
263
  def __init__(self, output_dir=None, default_flags=None, tpu=None):
264
    flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags]
Toby Boyd's avatar
Toby Boyd committed
265
266
267
268

    super(Resnet50KerasBenchmarkBase, self).__init__(
        output_dir=output_dir,
        flag_methods=flag_methods,
David Chen's avatar
David Chen committed
269
270
        default_flags=default_flags,
        tpu=tpu)
Toby Boyd's avatar
Toby Boyd committed
271

272
  @benchmark_wrappers.enable_runtime_flags
273
  def _run_and_report_benchmark(self, skip_steps=None):
274
    start_time_sec = time.time()
275
    stats = resnet_imagenet_main.run(FLAGS)
276
    wall_time_sec = time.time() - start_time_sec
277
    # Number of logged step time entries that are excluded in performance
278
279
280
    # report. We keep results from last 100 batches, or skip the steps based on
    # input skip_steps.
    warmup = (skip_steps or (FLAGS.train_steps - 100)) // FLAGS.log_steps
281
282
283
284
285

    super(Resnet50KerasBenchmarkBase, self)._report_benchmark(
        stats,
        wall_time_sec,
        total_batch_size=FLAGS.batch_size,
286
        log_steps=FLAGS.log_steps,
David Chen's avatar
David Chen committed
287
288
        warmup=warmup,
        start_time_sec=start_time_sec)
Toby Boyd's avatar
Toby Boyd committed
289
290

  def benchmark_1_gpu_no_dist_strat(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
291
    """Test Keras model with 1 GPU, no distribution strategy."""
Toby Boyd's avatar
Toby Boyd committed
292
293
294
295
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
296
    FLAGS.distribution_strategy = 'off'
297
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
Toby Boyd's avatar
Toby Boyd committed
298
    FLAGS.batch_size = 128
299
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
300

301
302
303
304
305
306
307
308
309
310
311
312
313
  def benchmark_1_gpu_no_dist_strat_run_eagerly(self):
    """Test Keras model with 1 GPU, no distribution strategy, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly')
    FLAGS.batch_size = 64
    self._run_and_report_benchmark()

314
315
316
317
318
319
320
321
322
323
324
325
326
327
  def benchmark_1_gpu_no_dist_strat_run_eagerly_tweaked(self):
    """Test Keras model with 1 GPU, no distribution strategy, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.explicit_gpu_placement = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly_tweaked')
    FLAGS.batch_size = 64
    self._run_and_report_benchmark()

328
329
330
331
332
333
334
335
336
337
338
339
340
341
  def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16(self):
    """Test with 1 GPU, no distribution strategy, fp16, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
  def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16_tweaked(self):
    """Test with 1 GPU, no distribution strategy, fp16, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.explicit_gpu_placement = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
357
  def benchmark_graph_1_gpu_no_dist_strat(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
358
    """Test Keras model in legacy graph mode with 1 GPU, no dist strat."""
Toby Boyd's avatar
Toby Boyd committed
359
360
361
362
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
363
    FLAGS.distribution_strategy = 'off'
364
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_no_dist_strat')
365
    FLAGS.batch_size = 96  # BatchNorm is less efficient in legacy graph mode
366
    # due to its reliance on v1 cond.
367
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
368
369

  def benchmark_1_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
370
    """Test Keras model with 1 GPU."""
Toby Boyd's avatar
Toby Boyd committed
371
372
373
374
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
375
    FLAGS.distribution_strategy = 'one_device'
376
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
Toby Boyd's avatar
Toby Boyd committed
377
    FLAGS.batch_size = 128
378
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
379

380
381
382
383
384
385
  def benchmark_1_gpu_amp(self):
    """Test Keras model with 1 GPU with automatic mixed precision."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
Vinh Nguyen's avatar
Vinh Nguyen committed
386
    FLAGS.dtype = 'fp16'
387
    FLAGS.fp16_implementation = 'graph_rewrite'
388
    FLAGS.distribution_strategy = 'one_device'
389
390
391
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_amp')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()
392

Haoyu Zhang's avatar
Haoyu Zhang committed
393
394
395
396
397
398
399
  def benchmark_xla_1_gpu(self):
    """Test Keras model with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
400
    FLAGS.distribution_strategy = 'one_device'
Haoyu Zhang's avatar
Haoyu Zhang committed
401
402
403
404
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu')
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

405
406
407
408
409
410
  def benchmark_xla_1_gpu_amp(self):
    """Test Keras model with XLA and 1 GPU with automatic mixed precision."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
Vinh Nguyen's avatar
Vinh Nguyen committed
411
    FLAGS.dtype = 'fp16'
412
    FLAGS.fp16_implementation = 'graph_rewrite'
413
    FLAGS.enable_xla = True
414
    FLAGS.distribution_strategy = 'one_device'
415
416
417
418
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_amp')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
419
  def benchmark_1_gpu_fp16(self):
420
    """Test Keras model with 1 GPU and fp16."""
Reed's avatar
Reed committed
421
422
423
424
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
425
    FLAGS.distribution_strategy = 'one_device'
Reed's avatar
Reed committed
426
427
428
429
430
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

431
432
433
434
435
436
  def benchmark_1_gpu_fp16_dynamic(self):
    """Test Keras model with 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
437
    FLAGS.distribution_strategy = 'one_device'
438
439
440
441
442
443
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
444
445
446
447
448
449
450
  def benchmark_xla_1_gpu_fp16(self):
    """Test Keras model with XLA, 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
451
    FLAGS.distribution_strategy = 'one_device'
Reed's avatar
Reed committed
452
453
454
455
456
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

457
458
459
460
461
462
463
  def benchmark_xla_1_gpu_fp16_tweaked(self):
    """Test Keras model with XLA, 1 GPU, fp16, and manual config tuning."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
464
    FLAGS.distribution_strategy = 'one_device'
465
466
467
468
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
469
470
    self._run_and_report_benchmark()

471
472
473
474
475
476
477
  def benchmark_xla_1_gpu_fp16_dynamic(self):
    """Test Keras model with XLA, 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
478
    FLAGS.distribution_strategy = 'one_device'
479
480
481
482
483
484
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
485
  def benchmark_graph_1_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
486
    """Test Keras model in legacy graph mode with 1 GPU."""
Toby Boyd's avatar
Toby Boyd committed
487
488
489
490
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
491
    FLAGS.distribution_strategy = 'one_device'
492
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
Toby Boyd's avatar
Toby Boyd committed
493
    FLAGS.batch_size = 128
494
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
495

Haoyu Zhang's avatar
Haoyu Zhang committed
496
497
498
499
500
501
502
  def benchmark_graph_xla_1_gpu(self):
    """Test Keras model in legacy graph mode with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
503
    FLAGS.distribution_strategy = 'one_device'
Haoyu Zhang's avatar
Haoyu Zhang committed
504
505
506
507
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu')
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

508
509
510
511
512
  def benchmark_graph_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
513
    FLAGS.dtype = 'fp16'
514
    FLAGS.enable_eager = False
515
    FLAGS.distribution_strategy = 'one_device'
516
517
518
519
520
521
522
523
524
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_graph_xla_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU, fp16 and XLA."""
    self._setup()

    FLAGS.num_gpus = 1
525
    FLAGS.dtype = 'fp16'
526
527
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
528
    FLAGS.distribution_strategy = 'one_device'
529
530
531
532
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

533
  def benchmark_graph_xla_1_gpu_fp16_tweaked(self):
534
    """Test Keras model in legacy graph with 1 GPU, fp16, XLA, and tuning."""
535
536
537
538
539
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
540
    FLAGS.distribution_strategy = 'one_device'
541
542
543
544
545
546
547
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
548
  def benchmark_8_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
549
    """Test Keras model with 8 GPUs."""
Toby Boyd's avatar
Toby Boyd committed
550
551
552
553
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
554
    FLAGS.distribution_strategy = 'mirrored'
555
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
556
    FLAGS.batch_size = 128 * 8  # 8 GPUs
557
    self._run_and_report_benchmark()
558

559
560
561
562
563
564
  def benchmark_8_gpu_amp(self):
    """Test Keras model with 8 GPUs with automatic mixed precision."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
Vinh Nguyen's avatar
Vinh Nguyen committed
565
    FLAGS.dtype = 'fp16'
566
    FLAGS.fp16_implementation = 'graph_rewrite'
567
    FLAGS.distribution_strategy = 'mirrored'
568
569
570
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_amp')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()
571

572
  def benchmark_8_gpu_tweaked(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
573
    """Test Keras model with manual config tuning and 8 GPUs."""
574
575
576
577
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
578
    FLAGS.distribution_strategy = 'mirrored'
579
580
581
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked')
    FLAGS.batch_size = 128 * 8  # 8 GPUs
    FLAGS.datasets_num_private_threads = 14
582
583
    self._run_and_report_benchmark()

Haoyu Zhang's avatar
Haoyu Zhang committed
584
585
586
587
588
589
590
  def benchmark_xla_8_gpu(self):
    """Test Keras model with XLA and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
591
    FLAGS.distribution_strategy = 'mirrored'
Haoyu Zhang's avatar
Haoyu Zhang committed
592
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu')
593
    FLAGS.batch_size = 128 * 8  # 8 GPUs
Haoyu Zhang's avatar
Haoyu Zhang committed
594
595
    self._run_and_report_benchmark()

596
597
598
599
600
601
  def benchmark_xla_8_gpu_amp(self):
    """Test Keras model with XLA and 8 GPUs with automatic mixed precision."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
Vinh Nguyen's avatar
Vinh Nguyen committed
602
    FLAGS.dtype = 'fp16'
603
    FLAGS.fp16_implementation = 'graph_rewrite'
604
    FLAGS.enable_xla = True
605
    FLAGS.distribution_strategy = 'mirrored'
606
607
608
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_amp')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()
609

610
611
612
613
614
615
616
  def benchmark_xla_8_gpu_tweaked(self):
    """Test Keras model with manual config tuning, 8 GPUs, and XLA."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
617
    FLAGS.distribution_strategy = 'mirrored'
618
619
620
621
622
623
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_tweaked')
    FLAGS.batch_size = 128 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.datasets_num_private_threads = 24
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
624
  def benchmark_8_gpu_fp16(self):
625
    """Test Keras model with 8 GPUs and fp16."""
Reed's avatar
Reed committed
626
627
628
    self._setup()

    FLAGS.num_gpus = 8
629
    FLAGS.dtype = 'fp16'
Reed's avatar
Reed committed
630
    FLAGS.enable_eager = True
631
    FLAGS.distribution_strategy = 'mirrored'
Reed's avatar
Reed committed
632
633
634
635
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

636
  def benchmark_8_gpu_fp16_tweaked(self):
637
    """Test Keras model with 8 GPUs, fp16, and manual config tuning."""
638
639
640
641
642
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
643
    FLAGS.distribution_strategy = 'mirrored'
644
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_tweaked')
645
646
647
648
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

649
  def benchmark_8_gpu_fp16_dynamic_tweaked(self):
Toby Boyd's avatar
Toby Boyd committed
650
    """Test Keras model with 8 GPUs, fp16, dynamic loss scaling, and tuned."""
651
652
653
654
655
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
656
    FLAGS.distribution_strategy = 'mirrored'
657
658
659
660
661
662
663
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
664
  def benchmark_xla_8_gpu_fp16(self):
665
    """Test Keras model with XLA, 8 GPUs and fp16."""
Reed's avatar
Reed committed
666
667
668
    self._setup()

    FLAGS.num_gpus = 8
669
    FLAGS.dtype = 'fp16'
Reed's avatar
Reed committed
670
671
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
672
    FLAGS.distribution_strategy = 'mirrored'
Reed's avatar
Reed committed
673
674
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
675
676
    self._run_and_report_benchmark()

677
678
679
680
681
682
683
684
  def benchmark_xla_8_gpu_fp16_tweaked(self):
    """Test Keras model with manual config tuning, XLA, 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
685
    FLAGS.distribution_strategy = 'mirrored'
686
687
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
688
689
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.datasets_num_private_threads = 48
690
691
    self._run_and_report_benchmark()

692
  def benchmark_xla_8_gpu_fp16_tweaked_delay_measure(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
693
694
695
    """Test with manual config tuning, XLA, 8 GPUs and fp16.

    Delay performance measurement for stable performance on 96 vCPU platforms.
696
697
698
699
700
701
702
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
703
    FLAGS.distribution_strategy = 'mirrored'
704
705
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_tweaked_delay_measure')
706
    FLAGS.batch_size = 256 * 8
707
708
709
710
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.train_steps = 310
    self._run_and_report_benchmark()

711
712
713
714
715
716
717
718
  def benchmark_xla_8_gpu_fp16_dynamic_tweaked(self):
    """Test Keras model with config tuning, XLA, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
719
    FLAGS.distribution_strategy = 'mirrored'
720
721
722
723
724
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
725
    FLAGS.datasets_num_private_threads = 48
726
727
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
728
  def benchmark_graph_8_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
729
    """Test Keras model in legacy graph mode with 8 GPUs."""
Toby Boyd's avatar
Toby Boyd committed
730
731
732
733
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
734
    FLAGS.distribution_strategy = 'mirrored'
735
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
736
    FLAGS.batch_size = 128 * 8  # 8 GPUs
737
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
738

Haoyu Zhang's avatar
Haoyu Zhang committed
739
740
741
742
743
744
745
  def benchmark_graph_xla_8_gpu(self):
    """Test Keras model in legacy graph mode with XLA and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
746
    FLAGS.distribution_strategy = 'mirrored'
Haoyu Zhang's avatar
Haoyu Zhang committed
747
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_8_gpu')
748
    FLAGS.batch_size = 128 * 8  # 8 GPUs
Haoyu Zhang's avatar
Haoyu Zhang committed
749
750
    self._run_and_report_benchmark()

751
752
753
754
755
756
757
  def benchmark_graph_8_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
758
    FLAGS.distribution_strategy = 'mirrored'
759
760
761
762
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

763
764
765
766
767
768
769
770
  def benchmark_graph_xla_8_gpu_fp16(self):
    """Test Keras model in legacy graph mode with XLA, 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
771
    FLAGS.distribution_strategy = 'mirrored'
772
773
774
775
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

776
  def benchmark_graph_8_gpu_fp16_tweaked(self):
777
    """Test Keras model in legacy graph mode, tuning, 8 GPUs, and FP16."""
778
779
780
781
782
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
783
    FLAGS.distribution_strategy = 'mirrored'
784
785
786
787
788
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

789
  def benchmark_graph_xla_8_gpu_fp16_tweaked(self):
790
    """Test Keras model in legacy graph tuning, XLA_FP16, 8 GPUs and fp16."""
791
792
793
794
795
796
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
797
    FLAGS.distribution_strategy = 'mirrored'
798
799
800
801
802
803
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

804
  def benchmark_graph_xla_8_gpu_fp16_tweaked_delay_measure(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
805
806
807
    """Test in legacy graph mode with manual config tuning, XLA, 8 GPUs, fp16.

    Delay performance measurement for stable performance on 96 vCPU platforms.
808
809
810
811
812
813
814
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
815
    FLAGS.distribution_strategy = 'mirrored'
816
817
818
819
820
821
822
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_tweaked_delay_measure')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.train_steps = 310
    self._run_and_report_benchmark()

823
824
825
826
827
828
829
  def benchmark_graph_8_gpu_fp16_dynamic_tweaked(self):
    """Test graph Keras with config tuning, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
830
    FLAGS.distribution_strategy = 'mirrored'
831
832
833
834
835
836
837
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

838
839
840
841
842
843
844
845
  def benchmark_graph_xla_8_gpu_fp16_dynamic_tweaked(self):
    """Test graph Keras with config tuning, XLA, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
846
    FLAGS.distribution_strategy = 'mirrored'
847
848
849
850
851
852
853
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

David Chen's avatar
David Chen committed
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
  def benchmark_2x2_tpu_fp16(self):
    """Test Keras model with 2x2 TPU, fp16."""
    self._setup()

    FLAGS.dtype = 'bf16'
    FLAGS.distribution_strategy = 'tpu'
    FLAGS.model_dir = self._get_model_dir('benchmark_2x2_tpu_fp16')
    FLAGS.batch_size = 1024
    self._run_and_report_benchmark()

  def benchmark_4x4_tpu_fp16(self):
    """Test Keras model with 4x4 TPU, fp16."""
    self._setup()

    FLAGS.dtype = 'bf16'
    FLAGS.distribution_strategy = 'tpu'
    FLAGS.model_dir = self._get_model_dir('benchmark_4x4_tpu_fp16')
    FLAGS.batch_size = 4096
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
874
875
876
877
878
879
  def fill_report_object(self, stats):
    super(Resnet50KerasBenchmarkBase, self).fill_report_object(
        stats,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)

Toby Boyd's avatar
Toby Boyd committed
880
881
882
883

class Resnet50KerasBenchmarkSynth(Resnet50KerasBenchmarkBase):
  """Resnet50 synthetic benchmark tests."""

David Chen's avatar
David Chen committed
884
  def __init__(self, output_dir=None, root_data_dir=None, tpu=None, **kwargs):
Toby Boyd's avatar
Toby Boyd committed
885
886
    def_flags = {}
    def_flags['skip_eval'] = True
887
    def_flags['report_accuracy_metrics'] = False
Toby Boyd's avatar
Toby Boyd committed
888
889
890
891
    def_flags['use_synthetic_data'] = True
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

892
    super(Resnet50KerasBenchmarkSynth, self).__init__(
David Chen's avatar
David Chen committed
893
        output_dir=output_dir, default_flags=def_flags, tpu=tpu)
Toby Boyd's avatar
Toby Boyd committed
894
895
896
897
898


class Resnet50KerasBenchmarkReal(Resnet50KerasBenchmarkBase):
  """Resnet50 real data benchmark tests."""

David Chen's avatar
David Chen committed
899
  def __init__(self, output_dir=None, root_data_dir=None, tpu=None, **kwargs):
Toby Boyd's avatar
Toby Boyd committed
900
901
    def_flags = {}
    def_flags['skip_eval'] = True
902
    def_flags['report_accuracy_metrics'] = False
903
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
Toby Boyd's avatar
Toby Boyd committed
904
905
906
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

907
    super(Resnet50KerasBenchmarkReal, self).__init__(
David Chen's avatar
David Chen committed
908
        output_dir=output_dir, default_flags=def_flags, tpu=tpu)
909
910


911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
class Resnet50KerasBenchmarkRemoteData(Resnet50KerasBenchmarkBase):
  """Resnet50 real data (stored in remote storage) benchmark tests."""

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
    def_flags = {}
    def_flags['skip_eval'] = True
    def_flags['report_accuracy_metrics'] = False
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
    # Defining multiple epochs overrides the train_steps setting in benchmarks.
    def_flags['train_epochs'] = 2
    # Cache dataset so performance is stable after the first epoch.
    def_flags['training_dataset_cache'] = True
    def_flags['log_steps'] = 100

    super(Resnet50KerasBenchmarkRemoteData, self).__init__(
        output_dir=output_dir, default_flags=def_flags)

928
  @benchmark_wrappers.enable_runtime_flags
929
930
931
932
933
934
  def _run_and_report_benchmark(self):
    # skip the first epoch for performance measurement.
    super(Resnet50KerasBenchmarkRemoteData,
          self)._run_and_report_benchmark(skip_steps=600)


935
class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark):
936
937
938
  """Trivial model with real data benchmark tests."""

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
939
    flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags]
Toby Boyd's avatar
Toby Boyd committed
940

941
    def_flags = {}
942
    def_flags['use_trivial_model'] = True
943
    def_flags['skip_eval'] = True
944
    def_flags['report_accuracy_metrics'] = False
945
    def_flags['dtype'] = 'fp16'
946
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
947
948
    def_flags['train_steps'] = 600
    def_flags['log_steps'] = 100
949
    def_flags['distribution_strategy'] = 'mirrored'
950

951
    super(TrivialKerasBenchmarkReal, self).__init__(
952
953
954
955
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags=def_flags)

956
  @benchmark_wrappers.enable_runtime_flags
957
958
  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
959
    stats = resnet_imagenet_main.run(FLAGS)
960
961
    wall_time_sec = time.time() - start_time_sec

962
    super(TrivialKerasBenchmarkReal, self)._report_benchmark(
963
964
965
966
967
        stats,
        wall_time_sec,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)

968
969
970
971
972
973
974
  def benchmark_8_gpu_warmup(self):
    """Dummy test that runs over an epoch to warmup the machine."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_warmup')
975
    FLAGS.batch_size = 256 * 8
976
977
978
    FLAGS.train_steps = 700
    self._run_and_report_benchmark()

979
980
981
982
983
984
  def benchmark_1_gpu(self):
    """Test trivial Keras model (input pipeline) with 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
985
    FLAGS.enable_xla = True
986
987
988
989
990
991
992
993
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_graph_1_gpu(self):
    """Test trivial Keras model (input pipeline) with 1 GPU."""
    self._setup()

994
    FLAGS.num_gpus = 1
995
    FLAGS.enable_eager = False
996
    FLAGS.enable_xla = True
997
998
999
1000
1001
1002
1003
1004
1005
1006
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_8_gpu(self):
    """Test trivial Keras model (input pipeline) with 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
1007
    FLAGS.enable_xla = True
1008
1009
1010
1011
1012
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
    FLAGS.batch_size = 256 * 8
    self._run_and_report_benchmark()

  def benchmark_8_gpu_tweaked(self):
1013
    """Test trivial Keras model with tuning and 8 GPUs."""
1014
1015
1016
1017
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
1018
    FLAGS.enable_xla = True
1019
1020
1021
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
1022
    FLAGS.datasets_num_private_threads = 48
1023
1024
1025
    self._run_and_report_benchmark()

  def benchmark_graph_8_gpu(self):
1026
    """Test trivial Keras model in legacy graph mode with 8 GPUs."""
1027
1028
1029
1030
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
1031
    FLAGS.enable_xla = True
1032
1033
1034
1035
1036
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
    FLAGS.batch_size = 256 * 8
    self._run_and_report_benchmark()

  def benchmark_graph_8_gpu_tweaked(self):
1037
    """Test trivial Keras model in legacy graph mode with tuning and 8 GPUs."""
1038
1039
1040
1041
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
1042
    FLAGS.enable_xla = True
1043
1044
1045
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
1046
    FLAGS.datasets_num_private_threads = 48
1047
1048
1049
    self._run_and_report_benchmark()

  def fill_report_object(self, stats):
1050
    super(TrivialKerasBenchmarkReal, self).fill_report_object(
1051
1052
1053
        stats,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)
1054
1055


1056
1057
1058
1059
1060
class Resnet50MultiWorkerKerasAccuracy(keras_benchmark.KerasBenchmark):
  """Resnet50 distributed accuracy tests with multiple workers."""

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
    flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags]
1061
    self.data_dir = os.path.join(root_data_dir, 'imagenet')
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
    super(Resnet50MultiWorkerKerasAccuracy, self).__init__(
        output_dir=output_dir, flag_methods=flag_methods)

  def _benchmark_common(self, eager, num_workers, all_reduce_alg):
    """Common to all benchmarks in this class."""
    self._setup()

    num_gpus = 8
    FLAGS.num_gpus = num_gpus
    FLAGS.data_dir = self.data_dir
    FLAGS.train_epochs = 90
    FLAGS.epochs_between_evals = 10
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = eager
    FLAGS.enable_xla = False
    FLAGS.distribution_strategy = 'multi_worker_mirrored'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
1079
    FLAGS.datasets_num_private_threads = 32
1080
1081
1082
1083
1084
1085
1086
1087
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_{}_8_gpu_{}_worker_fp16_{}_tweaked'.format(
            'eager' if eager else 'graph', num_workers, all_reduce_alg))
    FLAGS.batch_size = 256 * num_gpus * num_workers
    FLAGS.all_reduce_alg = all_reduce_alg

    self._run_and_report_benchmark()

1088
  @benchmark_wrappers.enable_runtime_flags
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
  def _run_and_report_benchmark(self,
                                top_1_min=MIN_TOP_1_ACCURACY,
                                top_1_max=MAX_TOP_1_ACCURACY):
    start_time_sec = time.time()
    stats = resnet_imagenet_main.run(flags.FLAGS)
    wall_time_sec = time.time() - start_time_sec

    super(Resnet50MultiWorkerKerasAccuracy, self)._report_benchmark(
        stats,
        wall_time_sec,
        top_1_min=top_1_min,
        top_1_max=top_1_max,
        total_batch_size=FLAGS.batch_size,
        log_steps=100)

  def _get_model_dir(self, folder_name):
    return os.path.join(self.output_dir, folder_name)

  def benchmark_eager_8_gpu_2_workers_fp16_ring_tweaked(self):
    """Eager, 8 GPUs per worker, 2 workers, fp16, ring all-reduce."""
    self._benchmark_common(eager=True, num_workers=2, all_reduce_alg='ring')

  def benchmark_eager_8_gpu_2_workers_fp16_nccl_tweaked(self):
    """Eager, 8 GPUs per worker, 2 workers, fp16, nccl all-reduce."""
    self._benchmark_common(eager=True, num_workers=2, all_reduce_alg='nccl')

  def benchmark_eager_8_gpu_8_workers_fp16_ring_tweaked(self):
    """Eager, 8 GPUs per worker, 8 workers, fp16, ring all-reduce."""
    self._benchmark_common(eager=True, num_workers=8, all_reduce_alg='ring')

  def benchmark_eager_8_gpu_8_workers_fp16_nccl_tweaked(self):
    """Eager, 8 GPUs per worker, 8 workers, fp16, nccl all-reduce."""
    self._benchmark_common(eager=True, num_workers=8, all_reduce_alg='nccl')


1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
class Resnet50MultiWorkerKerasBenchmark(Resnet50KerasBenchmarkBase):
  """Resnet50 distributed benchmark tests with multiple workers."""

  def __init__(self, output_dir=None, default_flags=None):
    super(Resnet50MultiWorkerKerasBenchmark, self).__init__(
        output_dir=output_dir, default_flags=default_flags)

  def _benchmark_common(self, eager, num_workers, all_reduce_alg):
    """Common to all benchmarks in this class."""
    self._setup()

    num_gpus = 8
    FLAGS.num_gpus = num_gpus
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = eager
    FLAGS.enable_xla = False
    FLAGS.distribution_strategy = 'multi_worker_mirrored'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
1142
    FLAGS.datasets_num_private_threads = 32
1143
    FLAGS.model_dir = self._get_model_dir(
1144
1145
        'benchmark_{}_8_gpu_{}_worker_fp16_{}_tweaked'.format(
            'eager' if eager else 'graph', num_workers, all_reduce_alg))
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
    FLAGS.batch_size = 256 * num_gpus * num_workers
    FLAGS.all_reduce_alg = all_reduce_alg

    self._run_and_report_benchmark()

  def benchmark_eager_8_gpu_1_worker_fp16_ring_tweaked(self):
    """Eager, 8 GPUs per worker, 1 worker, fp16, ring all-reduce."""
    self._benchmark_common(eager=True, num_workers=1, all_reduce_alg='ring')

  def benchmark_eager_8_gpu_1_worker_fp16_nccl_tweaked(self):
    """Eager, 8 GPUs per worker, 1 worker, fp16, nccl all-reduce."""
    self._benchmark_common(eager=True, num_workers=1, all_reduce_alg='nccl')

  def benchmark_eager_8_gpu_2_workers_fp16_ring_tweaked(self):
    """Eager, 8 GPUs per worker, 2 workers, fp16, ring all-reduce."""
    self._benchmark_common(eager=True, num_workers=2, all_reduce_alg='ring')

  def benchmark_eager_8_gpu_2_workers_fp16_nccl_tweaked(self):
    """Eager, 8 GPUs per worker, 2 workers, fp16, nccl all-reduce."""
    self._benchmark_common(eager=True, num_workers=2, all_reduce_alg='nccl')

  def benchmark_eager_8_gpu_8_workers_fp16_ring_tweaked(self):
    """Eager, 8 GPUs per worker, 8 workers, fp16, ring all-reduce."""
    self._benchmark_common(eager=True, num_workers=8, all_reduce_alg='ring')

  def benchmark_eager_8_gpu_8_workers_fp16_nccl_tweaked(self):
    """Eager, 8 GPUs per worker, 8 workers, fp16, nccl all-reduce."""
    self._benchmark_common(eager=True, num_workers=8, all_reduce_alg='nccl')


Ayush Dubey's avatar
Ayush Dubey committed
1176
class Resnet50MultiWorkerKerasBenchmarkSynth(Resnet50MultiWorkerKerasBenchmark):
1177
  """Resnet50 multi-worker synthetic data benchmark tests."""
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
    def_flags = {}
    def_flags['skip_eval'] = True
    def_flags['report_accuracy_metrics'] = False
    def_flags['use_synthetic_data'] = True
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

    super(Resnet50MultiWorkerKerasBenchmarkSynth, self).__init__(
        output_dir=output_dir, default_flags=def_flags)


1191
1192
1193
1194
1195
1196
1197
class Resnet50MultiWorkerKerasBenchmarkReal(Resnet50MultiWorkerKerasBenchmark):
  """Resnet50 multi-worker real data benchmark tests."""

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
    def_flags = {}
    def_flags['skip_eval'] = True
    def_flags['report_accuracy_metrics'] = False
1198
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
1199
1200
1201
1202
1203
1204
1205
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

    super(Resnet50MultiWorkerKerasBenchmarkReal, self).__init__(
        output_dir=output_dir, default_flags=def_flags)


Jaehong Kim's avatar
Jaehong Kim committed
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
# TODO(kimjaehong): It also should be also cover other metheods of model
# optimization techniques. In that time, this class will change to something
# like 'KerasModelOptimizationAccuracyBase'.
class KerasPruningAccuracyBase(keras_benchmark.KerasBenchmark):
  """Benchmark accuracy tests for pruning method."""

  def __init__(self,
               output_dir=None,
               root_data_dir=None,
               default_flags=None,
               **kwargs):
    """A accuracy benchmark class for pruning method.

    Args:
      output_dir: directory where to output e.g. log files
      root_data_dir: directory under which to look for dataset
      default_flags: default flags
      **kwargs: arbitrary named arguments. This is needed to make the
                constructor forward compatible in case PerfZero provides more
                named arguments before updating the constructor.
    """
    if default_flags is None:
      default_flags = {}
    default_flags['pruning_method'] = 'polynomial_decay'
    default_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')

    flag_methods = [resnet_imagenet_main.define_imagenet_keras_flags]

    super(KerasPruningAccuracyBase, self).__init__(
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags=default_flags,
        **kwargs)

  def benchmark_8_gpu(self):
    """Test Keras model with eager, dist_strat and 8 GPUs."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.batch_size = 32 * 8
    FLAGS.train_epochs = 90
    FLAGS.epochs_between_evals = 10
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
    FLAGS.dtype = 'fp32'
    FLAGS.enable_eager = True
    self._run_and_report_benchmark()

  @benchmark_wrappers.enable_runtime_flags
  def _run_and_report_benchmark(self,
                                top_1_min=MODEL_OPTIMIZATION_TOP_1_ACCURACY[
                                    'RESNET50_FINETUNE_PRUNING'][0],
                                top_1_max=MODEL_OPTIMIZATION_TOP_1_ACCURACY[
                                    'RESNET50_FINETUNE_PRUNING'][1]):
    start_time_sec = time.time()
    stats = resnet_imagenet_main.run(flags.FLAGS)
    wall_time_sec = time.time() - start_time_sec

    super(KerasPruningAccuracyBase, self)._report_benchmark(
        stats,
        wall_time_sec,
        top_1_min=top_1_min,
        top_1_max=top_1_max,
        total_batch_size=FLAGS.batch_size,
        log_steps=100)


class MobilenetV1KerasPruningAccuracy(KerasPruningAccuracyBase):
  """Benchmark accuracy tests for MobilenetV1 with pruning method."""

  def __init__(self, root_data_dir=None, **kwargs):
    default_flags = {
        'model': 'mobilenet',
        'optimizer': 'mobilenet_default',
        'initial_learning_rate_per_sample': 0.00007,
        'pretrained_filepath': tf.train.latest_checkpoint(
            os.path.join(root_data_dir, 'mobilenet_v1')),
        'pruning_begin_step': 0,
        'pruning_end_step': 100000,
        'pruning_initial_sparsity': 0.0,
        'pruning_final_sparsity': 0.5,
        'pruning_frequency': 100,
    }
    super(MobilenetV1KerasPruningAccuracy, self).__init__(
        root_data_dir=root_data_dir,
        default_flags=default_flags,
        **kwargs)

  def _run_and_report_benchmark(self):
    super(MobilenetV1KerasPruningAccuracy, self)._run_and_report_benchmark(
        top_1_min=\
        MODEL_OPTIMIZATION_TOP_1_ACCURACY['MOBILENET_V1_FINETUNE_PRUNING'][0],
        top_1_max=\
        MODEL_OPTIMIZATION_TOP_1_ACCURACY['MOBILENET_V1_FINETUNE_PRUNING'][1])


class Resnet50KerasPruningAccuracy(KerasPruningAccuracyBase):
  """Benchmark accuracy tests for resnet50 with pruning method."""

  def __init__(self, root_data_dir=None, **kwargs):
    default_flags = {
        'model': 'resnet50_v1.5',
        'optimizer': 'mobilenet_default',
        'initial_learning_rate_per_sample': 0.0000039,
        'pretrained_filepath': tf.train.latest_checkpoint(
            os.path.join(root_data_dir, 'resnet50')),
        'pruning_begin_step': 0,
        'pruning_end_step': 50000,
        'pruning_initial_sparsity': 0.0,
        'pruning_final_sparsity': 0.5,
        'pruning_frequency': 100,
    }
    super(Resnet50KerasPruningAccuracy, self).__init__(
        root_data_dir=root_data_dir,
        default_flags=default_flags,
        **kwargs)

  def _run_and_report_benchmark(self):
    super(Resnet50KerasPruningAccuracy, self)._run_and_report_benchmark(
        top_1_min=\
        MODEL_OPTIMIZATION_TOP_1_ACCURACY['RESNET50_FINETUNE_PRUNING'][0],
        top_1_max=\
        MODEL_OPTIMIZATION_TOP_1_ACCURACY['RESNET50_FINETUNE_PRUNING'][1])


class KerasPruningBenchmarkRealBase(Resnet50KerasBenchmarkBase):
  """Pruning method benchmarks."""

  def __init__(self, root_data_dir=None, default_flags=None, **kwargs):
    if default_flags is None:
      default_flags = {}
    default_flags.update({
        'skip_eval': True,
        'report_accuracy_metrics': False,
        'data_dir': os.path.join(root_data_dir, 'imagenet'),
        'train_steps': 110,
        'log_steps': 10,
        'pruning_method': 'polynomial_decay',
        'pruning_begin_step': 0,
        'pruning_end_step': 50000,
        'pruning_initial_sparsity': 0,
        'pruning_final_sparsity': 0.5,
        'pruning_frequency': 100,
    })
    super(KerasPruningBenchmarkRealBase, self).__init__(
        default_flags=default_flags, **kwargs)


class MobilenetV1KerasPruningBenchmarkReal(KerasPruningBenchmarkRealBase):
  """Pruning method benchmarks for MobilenetV1."""

  def __init__(self, **kwargs):
    default_flags = {
        'model': 'mobilenet',
        'optimizer': 'mobilenet_default',
    }
    super(MobilenetV1KerasPruningBenchmarkReal, self).__init__(
        default_flags=default_flags, **kwargs)


class Resnet50KerasPruningBenchmarkReal(KerasPruningBenchmarkRealBase):
  """Pruning method benchmarks for resnet50."""

  def __init__(self, **kwargs):
    default_flags = {
        'model': 'resnet50_v1.5',
        'optimizer': 'mobilenet_default',
    }
    super(Resnet50KerasPruningBenchmarkReal, self).__init__(
        default_flags=default_flags, **kwargs)


1376
1377
if __name__ == '__main__':
  tf.test.main()