keras_imagenet_benchmark.py 42.3 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executes Keras benchmarks and accuracy tests."""
from __future__ import print_function

import os
19
import time
20
21

from absl import flags
22
import tensorflow as tf  # pylint: disable=g-bad-import-order
23
24

from official.resnet import imagenet_main
Toby Boyd's avatar
Toby Boyd committed
25
from official.resnet.keras import keras_benchmark
26
27
28
from official.resnet.keras import keras_common
from official.resnet.keras import keras_imagenet_main

Toby Boyd's avatar
Toby Boyd committed
29
30
MIN_TOP_1_ACCURACY = 0.76
MAX_TOP_1_ACCURACY = 0.77
31

Toby Boyd's avatar
Toby Boyd committed
32
FLAGS = flags.FLAGS
33
34


Toby Boyd's avatar
Toby Boyd committed
35
36
class Resnet50KerasAccuracy(keras_benchmark.KerasBenchmark):
  """Benchmark accuracy tests for ResNet50 in Keras."""
37

38
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
39
40
41
42
43
    """A benchmark class.

    Args:
      output_dir: directory where to output e.g. log files
      root_data_dir: directory under which to look for dataset
44
45
46
      **kwargs: arbitrary named arguments. This is needed to make the
                constructor forward compatible in case PerfZero provides more
                named arguments before updating the constructor.
47
48
    """

49
    flag_methods = [
50
51
        keras_common.define_keras_flags,
        lambda: imagenet_main.define_imagenet_flags(dynamic_loss_scale=True)
52
    ]
Toby Boyd's avatar
Toby Boyd committed
53

54
    self.data_dir = os.path.join(root_data_dir, 'imagenet')
55
56
    super(Resnet50KerasAccuracy, self).__init__(
        output_dir=output_dir, flag_methods=flag_methods)
57

Toby Boyd's avatar
Toby Boyd committed
58
  def benchmark_graph_8_gpu(self):
59
60
    """Test Keras model with Keras fit/dist_strat and 8 GPUs."""
    self._setup()
Toby Boyd's avatar
Toby Boyd committed
61
    FLAGS.num_gpus = 8
62
    FLAGS.data_dir = self.data_dir
63
    FLAGS.batch_size = 128 * 8
Toby Boyd's avatar
Toby Boyd committed
64
    FLAGS.train_epochs = 90
65
    FLAGS.epochs_between_evals = 10
66
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
67
    FLAGS.dtype = 'fp32'
68
    FLAGS.use_tensor_lr = True
69
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
70
71

  def benchmark_8_gpu(self):
72
73
    """Test Keras model with eager, dist_strat and 8 GPUs."""
    self._setup()
Toby Boyd's avatar
Toby Boyd committed
74
    FLAGS.num_gpus = 8
75
    FLAGS.data_dir = self.data_dir
76
    FLAGS.batch_size = 128 * 8
Toby Boyd's avatar
Toby Boyd committed
77
    FLAGS.train_epochs = 90
78
    FLAGS.epochs_between_evals = 10
79
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
80
81
    FLAGS.dtype = 'fp32'
    FLAGS.enable_eager = True
82
83
    # Add some thread tunings to improve performance.
    FLAGS.datasets_num_private_threads = 14
84
    FLAGS.use_tensor_lr = True
85
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
86

Reed's avatar
Reed committed
87
88
89
90
91
92
93
  def benchmark_8_gpu_fp16(self):
    """Test Keras model with eager, dist_strat, 8 GPUs, and fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
94
    FLAGS.epochs_between_evals = 10
Reed's avatar
Reed committed
95
96
97
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
98
99
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
100
    FLAGS.use_tensor_lr = True
Reed's avatar
Reed committed
101
102
103
104
105
106
107
108
109
    self._run_and_report_benchmark()

  def benchmark_xla_8_gpu_fp16(self):
    """Test Keras model with XLA, eager, dist_strat, 8 GPUs and fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
110
    FLAGS.epochs_between_evals = 10
Reed's avatar
Reed committed
111
112
113
114
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
115
116
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
117
    FLAGS.use_tensor_lr = True
Reed's avatar
Reed committed
118
119
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
  def benchmark_8_gpu_mlperf_like_tweaked(self):
    """Test similar to the rules for MLPerf 0.5.

    Listed below are reasons this comparison is not to the MLSpec, but this is
    still a decent directional measurement:
      - Eval is every 4 epochs and again at the end. ~2 extra times.
      - Learning rate is not tuned to hit 75%, but we know the model is correct.
      - We measure total time and MLPerf 0.5 excluded some startup time.
      - Eval is not on the total set, need to set eval batch_size where
        8*batch_size/50K is even. 250 is a good number.
      - Not sure if we are doing any extra or too few steps due to epoch bleed.
    """
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 61
    FLAGS.epochs_between_evals = 4
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mlperf_like_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.use_tensor_lr = True
143
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
Toby Boyd's avatar
Toby Boyd committed
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
    self._run_and_report_benchmark()

  def benchmark_8_gpu_mlperf_like(self):
    """Test similar to the rules for MLPerf 0.5.

    Listed below are reasons this comparison is not to the MLSpec, but this is
    still a decent directional measurement:
      - Eval is every 4 epochs and again at the end. ~2 extra times.
      - Learning rate is not tuned to hit 75%, but we know the model is correct.
      - We measure total time and MLPerf 0.5 excluded some startup time.
      - Eval is not on the total set, need to set eval batch_size where
        8*batch_size/50K is even. 250 is a good number.
      - Not sure if we are doing any extra or too few steps due to epoch bleed.
    """
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 61
    FLAGS.epochs_between_evals = 4
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mlperf_like')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    self._run_and_report_benchmark()

170
171
172
173
174
175
176
  def benchmark_xla_8_gpu_fp16_dynamic(self):
    """Test Keras model with XLA, eager, dist_strat, 8 GPUs, dynamic fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
177
    FLAGS.epochs_between_evals = 10
178
179
180
181
182
183
184
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.loss_scale = 'dynamic'
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
185
    FLAGS.use_tensor_lr = True
186
187
    self._run_and_report_benchmark()

188
189
190
191
192
193
  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
    stats = keras_imagenet_main.run(flags.FLAGS)
    wall_time_sec = time.time() - start_time_sec

    super(Resnet50KerasAccuracy, self)._report_benchmark(
Toby Boyd's avatar
Toby Boyd committed
194
        stats,
195
        wall_time_sec,
Toby Boyd's avatar
Toby Boyd committed
196
197
        top_1_min=MIN_TOP_1_ACCURACY,
        top_1_max=MAX_TOP_1_ACCURACY,
198
        total_batch_size=FLAGS.batch_size,
Toby Boyd's avatar
Toby Boyd committed
199
        log_steps=100)
200
201
202
203

  def _get_model_dir(self, folder_name):
    return os.path.join(self.output_dir, folder_name)

Toby Boyd's avatar
Toby Boyd committed
204
205
206
207
208

class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
  """Resnet50 benchmarks."""

  def __init__(self, output_dir=None, default_flags=None):
209
    flag_methods = [
210
211
        keras_common.define_keras_flags,
        lambda: imagenet_main.define_imagenet_flags(dynamic_loss_scale=True)
212
    ]
Toby Boyd's avatar
Toby Boyd committed
213
214
215
216
217
218

    super(Resnet50KerasBenchmarkBase, self).__init__(
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags=default_flags)

219
220
  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
Toby Boyd's avatar
Toby Boyd committed
221
    stats = keras_imagenet_main.run(FLAGS)
222
    wall_time_sec = time.time() - start_time_sec
223
224
225
    # Number of logged step time entries that are excluded in performance
    # report. We keep results from last 100 batches in this case.
    warmup = (FLAGS.train_steps - 100) // FLAGS.log_steps
226
227
228
229
230

    super(Resnet50KerasBenchmarkBase, self)._report_benchmark(
        stats,
        wall_time_sec,
        total_batch_size=FLAGS.batch_size,
231
232
        log_steps=FLAGS.log_steps,
        warmup=warmup)
Toby Boyd's avatar
Toby Boyd committed
233
234

  def benchmark_1_gpu_no_dist_strat(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
235
    """Test Keras model with 1 GPU, no distribution strategy."""
Toby Boyd's avatar
Toby Boyd committed
236
237
238
239
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
240
    FLAGS.distribution_strategy = 'off'
241
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
Toby Boyd's avatar
Toby Boyd committed
242
    FLAGS.batch_size = 128
243
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
244

245
246
247
248
249
250
251
252
253
254
255
256
257
  def benchmark_1_gpu_no_dist_strat_run_eagerly(self):
    """Test Keras model with 1 GPU, no distribution strategy, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly')
    FLAGS.batch_size = 64
    self._run_and_report_benchmark()

258
259
260
261
262
263
264
265
266
267
268
269
270
271
  def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16(self):
    """Test with 1 GPU, no distribution strategy, fp16, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
272
  def benchmark_graph_1_gpu_no_dist_strat(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
273
    """Test Keras model in legacy graph mode with 1 GPU, no dist strat."""
Toby Boyd's avatar
Toby Boyd committed
274
275
276
277
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
278
    FLAGS.distribution_strategy = 'off'
279
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_no_dist_strat')
280
281
    FLAGS.batch_size = 96  # BatchNorm is less efficient in legacy graph mode
                           # due to its reliance on v1 cond.
282
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
283
284

  def benchmark_1_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
285
    """Test Keras model with 1 GPU."""
Toby Boyd's avatar
Toby Boyd committed
286
287
288
289
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
290
    FLAGS.distribution_strategy = 'default'
291
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
Toby Boyd's avatar
Toby Boyd committed
292
    FLAGS.batch_size = 128
293
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
294

295
296
297
298
299
300
301
302
303
304
305
306
  def benchmark_1_gpu_layout_off(self):
    """Test Keras model with 1 GPU and no layout optimizer."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_layout_off')
    FLAGS.batch_size = 128
    FLAGS.enable_grappler_layout_optimizer = False
    self._run_and_report_benchmark()

Haoyu Zhang's avatar
Haoyu Zhang committed
307
308
309
310
311
312
313
314
315
316
317
318
  def benchmark_xla_1_gpu(self):
    """Test Keras model with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu')
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

319
320
321
322
323
324
325
326
327
328
329
330
  def benchmark_xla_1_gpu_layout_off(self):
    """Test Keras model with 1 GPU and xla w/no layout optimizer."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_layout_off')
    FLAGS.batch_size = 128
    FLAGS.enable_grappler_layout_optimizer = False
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
331
  def benchmark_1_gpu_fp16(self):
332
    """Test Keras model with 1 GPU and fp16."""
Reed's avatar
Reed committed
333
334
335
336
337
338
339
340
341
342
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

343
344
345
346
347
348
349
350
351
352
353
354
355
356
  def benchmark_1_gpu_fp16_layout_off(self):
    """Test Keras model with 1 GPU and FP16 w/no layout optimizer."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16_layout_off')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.enable_grappler_layout_optimizer = False
    FLAGS.data_format = 'channels_last'
    self._run_and_report_benchmark()

357
358
359
360
361
362
363
364
365
366
367
368
369
  def benchmark_1_gpu_fp16_dynamic(self):
    """Test Keras model with 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
370
371
372
373
374
375
376
377
378
379
380
381
382
  def benchmark_xla_1_gpu_fp16(self):
    """Test Keras model with XLA, 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
  def benchmark_xla_1_gpu_fp16_layout_off(self):
    """Test Keras model with FP16+XLA w/no layout optimizer."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_layout_off')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.enable_grappler_layout_optimizer = False
    FLAGS.data_format = 'channels_last'
    self._run_and_report_benchmark()

398
399
400
401
402
403
404
405
406
407
408
  def benchmark_xla_1_gpu_fp16_tweaked(self):
    """Test Keras model with XLA, 1 GPU, fp16, and manual config tuning."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
409
    FLAGS.use_tensor_lr = True
410
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
411
412
413
    self._run_and_report_benchmark()

  def benchmark_xla_1_gpu_fp16_slack(self):
414
    """Test Keras model tf.data's experimental_slack functionality."""
415
416
417
418
419
420
421
422
423
424
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_slack')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_data_experimental_slack = True
425
426
    self._run_and_report_benchmark()

427
428
429
430
431
432
433
434
435
436
437
438
439
440
  def benchmark_xla_1_gpu_fp16_dynamic(self):
    """Test Keras model with XLA, 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
441
  def benchmark_graph_1_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
442
    """Test Keras model in legacy graph mode with 1 GPU."""
Toby Boyd's avatar
Toby Boyd committed
443
444
445
446
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
447
    FLAGS.distribution_strategy = 'default'
448
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
Toby Boyd's avatar
Toby Boyd committed
449
    FLAGS.batch_size = 128
450
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
451

Haoyu Zhang's avatar
Haoyu Zhang committed
452
453
454
455
456
457
458
459
460
461
462
463
  def benchmark_graph_xla_1_gpu(self):
    """Test Keras model in legacy graph mode with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu')
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

464
465
466
467
468
  def benchmark_graph_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
469
    FLAGS.dtype = 'fp16'
470
471
472
473
474
475
476
477
478
479
480
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_graph_xla_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU, fp16 and XLA."""
    self._setup()

    FLAGS.num_gpus = 1
481
    FLAGS.dtype = 'fp16'
482
483
484
485
486
487
488
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

489
  def benchmark_graph_xla_1_gpu_fp16_tweaked(self):
490
    """Test Keras model in legacy graph with 1 GPU, fp16, XLA, and tuning."""
491
492
493
494
495
496
497
498
499
500
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
501
    FLAGS.use_tensor_lr = True
502
503
504
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

505
  def benchmark_graph_xla_1_gpu_fp16_slack(self):
506
    """Test model in legacy graph with tf.data's experimental_slack."""
507
508
509
510
511
512
513
514
515
516
517
518
519
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_1_gpu_fp16_slack')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_data_experimental_slack = True
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
520
  def benchmark_8_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
521
    """Test Keras model with 8 GPUs."""
Toby Boyd's avatar
Toby Boyd committed
522
523
524
525
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
526
    FLAGS.distribution_strategy = 'default'
527
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
528
    FLAGS.batch_size = 128 * 8  # 8 GPUs
529
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
530

531
532
  def benchmark_8_gpu_cloning(self):
    """Test Keras model with 8 GPUs and cloning."""
533
534
535
536
537
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
538
539
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_cloning')
540
541
542
    FLAGS.batch_size = 128 * 8  # 8 GPUs
    self._run_and_report_benchmark()

543
  def benchmark_8_gpu_tweaked(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
544
    """Test Keras model with manual config tuning and 8 GPUs."""
545
546
547
548
549
550
551
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked')
    FLAGS.batch_size = 128 * 8  # 8 GPUs
552
    FLAGS.use_tensor_lr = True
553
    FLAGS.datasets_num_private_threads = 14
554
555
556
557
558
559
560
561
562
563
564
565
    self._run_and_report_benchmark()

  def benchmark_8_gpu_slack(self):
    """Test Keras model with tf.data's experimental_slack and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_slack')
    FLAGS.batch_size = 128 * 8  # 8 GPUs
    FLAGS.tf_data_experimental_slack = True
566
567
    self._run_and_report_benchmark()

Haoyu Zhang's avatar
Haoyu Zhang committed
568
569
570
571
572
573
574
575
576
  def benchmark_xla_8_gpu(self):
    """Test Keras model with XLA and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu')
577
    FLAGS.batch_size = 128 * 8  # 8 GPUs
Haoyu Zhang's avatar
Haoyu Zhang committed
578
579
    self._run_and_report_benchmark()

580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
  def benchmark_xla_8_gpu_tweaked(self):
    """Test Keras model with manual config tuning, 8 GPUs, and XLA."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_tweaked')
    FLAGS.batch_size = 128 * 8
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.datasets_num_private_threads = 24
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
595
  def benchmark_8_gpu_fp16(self):
596
    """Test Keras model with 8 GPUs and fp16."""
Reed's avatar
Reed committed
597
598
599
    self._setup()

    FLAGS.num_gpus = 8
600
    FLAGS.dtype = 'fp16'
Reed's avatar
Reed committed
601
602
603
604
605
606
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

607
608
609
610
611
612
613
614
615
616
617
618
619
620
  def benchmark_8_gpu_fp16_layout_off(self):
    """Test Keras model with 8 GPUs, fp16, and layout off."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_layout_off')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.enable_grappler_layout_optimizer = False
    FLAGS.data_format = 'channels_last'
    self._run_and_report_benchmark()

621
622
623
624
625
626
627
628
629
630
631
632
633
  def benchmark_8_gpu_fp16_cloning(self):
    """Test Keras model with 8 GPUs, fp16 and cloning."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_cloning')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

634
  def benchmark_8_gpu_fp16_tweaked(self):
635
    """Test Keras model with 8 GPUs, fp16, and manual config tuning."""
636
637
638
639
640
641
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
642
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_tweaked')
643
    FLAGS.batch_size = 256 * 8  # 8 GPUs
644
    FLAGS.use_tensor_lr = True
645
646
647
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
  def benchmark_8_gpu_fp16_tweaked_layout_off(self):
    """Test Keras model with 8 GPUs, fp16,tuning, and layout off."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_8_gpu_fp16_tweaked_layout_off')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.data_delay_prefetch = True
    FLAGS.enable_grappler_layout_optimizer = False
    FLAGS.data_format = 'channels_last'
    self._run_and_report_benchmark()

666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
  def benchmark_8_gpu_fp16_cloning_tweaked(self):
    """Test Keras model with 8 GPUs, fp16, cloning, and manual config tuning."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_8_gpu_fp16_cloning_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.data_delay_prefetch = True
    self._run_and_report_benchmark()

683
  def benchmark_8_gpu_fp16_dynamic_tweaked(self):
Toby Boyd's avatar
Toby Boyd committed
684
    """Test Keras model with 8 GPUs, fp16, dynamic loss scaling, and tuned."""
685
686
687
688
689
690
691
692
693
694
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
695
    FLAGS.use_tensor_lr = True
696
697
698
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

rxsang's avatar
rxsang committed
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
  def benchmark_xla_8_gpu_fp16_optional_next(self):
    """Test Keras model with XLA, 8 GPUs and fp16.

    This test also enables get_next_as_optional.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_optional_next')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.enable_get_next_as_optional = True
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
717
  def benchmark_xla_8_gpu_fp16(self):
718
    """Test Keras model with XLA, 8 GPUs and fp16."""
Reed's avatar
Reed committed
719
720
721
    self._setup()

    FLAGS.num_gpus = 8
722
    FLAGS.dtype = 'fp16'
Reed's avatar
Reed committed
723
724
725
726
727
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
728
729
    self._run_and_report_benchmark()

730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
  def benchmark_xla_8_gpu_fp16_layout_off(self):
    """Test Keras model with XLA, 8 GPUs, fp16, and layout off."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_layout_off')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.enable_grappler_layout_optimizer = False
    FLAGS.data_format = 'channels_last'
    self._run_and_report_benchmark()

745
746
747
748
749
750
751
752
753
754
755
756
  def benchmark_xla_8_gpu_fp16_cloning(self):
    """Test Keras model with XLA, 8 GPUs, fp16 and cloning."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_cloning')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
Reed's avatar
Reed committed
757
758
    self._run_and_report_benchmark()

759
760
761
762
763
764
765
766
767
768
769
  def benchmark_xla_8_gpu_fp16_tweaked(self):
    """Test Keras model with manual config tuning, XLA, 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
770
    FLAGS.use_tensor_lr = True
771
772
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.datasets_num_private_threads = 48
773
774
    self._run_and_report_benchmark()

775
  def benchmark_xla_8_gpu_fp16_cloning_tweaked(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
776
    """Test with manual config tuning, XLA, 8 GPUs, fp16, and cloning."""
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_cloning_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.use_tensor_lr = True
    # FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.data_delay_prefetch = True
    self._run_and_report_benchmark()

793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
  def benchmark_xla_8_gpu_fp16_cloning_tweaked_layout_off(self):
    """Test with tuning, FP16+XLA, cloning, and layout_off."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_cloning_tweaked_layout_off')
    FLAGS.batch_size = 256 * 8
    FLAGS.use_tensor_lr = True
    FLAGS.enable_grappler_layout_optimizer = False
    FLAGS.data_format = 'channels_last'
    self._run_and_report_benchmark()

rxsang's avatar
rxsang committed
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
  def benchmark_xla_8_gpu_fp16_cloning_tweaked_optional_next(self):
    """Test with manual config tuning, XLA, 8 GPUs, fp16, and cloning.

    This test also enables get_next_as_optional.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_cloning_tweaked_optional_next')
    FLAGS.batch_size = 256 * 8
    FLAGS.use_tensor_lr = True
    # FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.data_delay_prefetch = True
    FLAGS.enable_get_next_as_optional = True
    self._run_and_report_benchmark()

833
  def benchmark_xla_8_gpu_fp16_tweaked_delay_measure(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
834
835
836
    """Test with manual config tuning, XLA, 8 GPUs and fp16.

    Delay performance measurement for stable performance on 96 vCPU platforms.
837
838
839
840
841
842
843
844
845
846
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_tweaked_delay_measure')
847
    FLAGS.batch_size = 256 * 8
848
849
850
851
852
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.train_steps = 310
    self._run_and_report_benchmark()

853
  def benchmark_xla_8_gpu_fp16_cloning_tweaked_delay_measure(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
854
855
856
    """Test with manual config tuning, XLA, 8 GPUs, fp16, and cloning.

    Delay performance measurement for stable performance on 96 vCPU platforms.
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_cloning_tweaked_delay_measure')
    FLAGS.batch_size = 256 * 8
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.data_delay_prefetch = True
    FLAGS.train_steps = 310
    self._run_and_report_benchmark()

875
  def benchmark_xla_8_gpu_fp16_tweaked_optional_next(self):
876
877
878
    """Test Keras model with manual config tuning, XLA, 8 GPUs, fp16.

    This test also enables get_next_as_optional.
879
880
881
882
883
884
885
886
887
888
889
890
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_tweaked_optional_next')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.use_tensor_lr = True
891
892
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.datasets_num_private_threads = 48
893
894
895
    FLAGS.enable_get_next_as_optional = True
    self._run_and_report_benchmark()

896
  def benchmark_xla_8_gpu_fp16_slack(self):
897
898
899
    """Test Keras model with XLA, 8 GPUs and fp16.

    This test also enable tf.data's experimental_slack functionality.
900
901
902
903
904
905
906
907
908
909
910
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_slack')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_data_experimental_slack = True
911
912
    self._run_and_report_benchmark()

913
914
915
916
917
918
919
920
921
922
923
924
925
  def benchmark_xla_8_gpu_fp16_dynamic_tweaked(self):
    """Test Keras model with config tuning, XLA, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
926
    FLAGS.use_tensor_lr = True
927
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
928
    FLAGS.datasets_num_private_threads = 48
929
930
    self._run_and_report_benchmark()

931
932
933
934
935
936
937
938
939
940
941
942
  def benchmark_xla_8_gpu_fp16_tensorboard_tweaked(self):
    """Test to track Tensorboard performance overhead."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_tensorboard_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
943
    FLAGS.use_tensor_lr = True
944
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
945
    FLAGS.datasets_num_private_threads = 48
946
947
948
    FLAGS.enable_tensorboard = True
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
949
  def benchmark_graph_8_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
950
    """Test Keras model in legacy graph mode with 8 GPUs."""
Toby Boyd's avatar
Toby Boyd committed
951
952
953
954
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
955
    FLAGS.distribution_strategy = 'default'
956
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
957
    FLAGS.batch_size = 128 * 8  # 8 GPUs
958
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
959

Haoyu Zhang's avatar
Haoyu Zhang committed
960
961
962
963
964
965
966
967
968
  def benchmark_graph_xla_8_gpu(self):
    """Test Keras model in legacy graph mode with XLA and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_8_gpu')
969
    FLAGS.batch_size = 128 * 8  # 8 GPUs
Haoyu Zhang's avatar
Haoyu Zhang committed
970
971
    self._run_and_report_benchmark()

972
973
974
975
976
977
978
979
980
981
982
983
  def benchmark_graph_8_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

984
985
986
987
988
989
990
991
992
993
994
995
996
  def benchmark_graph_xla_8_gpu_fp16(self):
    """Test Keras model in legacy graph mode with XLA, 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

997
  def benchmark_graph_8_gpu_fp16_tweaked(self):
998
    """Test Keras model in legacy graph mode, tuning, 8 GPUs, and FP16."""
999
1000
1001
1002
1003
1004
1005
1006
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
1007
    FLAGS.use_tensor_lr = True
1008
1009
1010
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

1011
  def benchmark_graph_xla_8_gpu_fp16_tweaked(self):
1012
    """Test Keras model in legacy graph tuning, XLA_FP16, 8 GPUs and fp16."""
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
1023
    FLAGS.use_tensor_lr = True
1024
1025
1026
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

1027
  def benchmark_graph_xla_8_gpu_fp16_tweaked_delay_measure(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
1028
1029
1030
    """Test in legacy graph mode with manual config tuning, XLA, 8 GPUs, fp16.

    Delay performance measurement for stable performance on 96 vCPU platforms.
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_tweaked_delay_measure')
    FLAGS.batch_size = 256 * 8
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.train_steps = 310
    self._run_and_report_benchmark()

1047
  def benchmark_graph_xla_8_gpu_fp16_tweaked_optional_next(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
1048
    """Test in legacy graph mode with manual config tuning, XLA, 8 GPUs, fp16.
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066

    This test also enables get_next_as_optional.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_tweaked_optional_next')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.enable_get_next_as_optional = True
    self._run_and_report_benchmark()

1067
  def benchmark_graph_xla_8_gpu_fp16_slack(self):
1068
    """Test legacy graph mode with tf.data's experimental_slack."""
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_slack')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_data_experimental_slack = True
    self._run_and_report_benchmark()

1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
  def benchmark_graph_8_gpu_fp16_dynamic_tweaked(self):
    """Test graph Keras with config tuning, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
1094
    FLAGS.use_tensor_lr = True
1095
1096
1097
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
  def benchmark_graph_xla_8_gpu_fp16_dynamic_tweaked(self):
    """Test graph Keras with config tuning, XLA, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
1110
    FLAGS.use_tensor_lr = True
1111
1112
1113
1114
    FLAGS.loss_scale = 'dynamic'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
1115
1116
1117
1118
1119
1120
  def fill_report_object(self, stats):
    super(Resnet50KerasBenchmarkBase, self).fill_report_object(
        stats,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)

Toby Boyd's avatar
Toby Boyd committed
1121
1122
1123
1124

class Resnet50KerasBenchmarkSynth(Resnet50KerasBenchmarkBase):
  """Resnet50 synthetic benchmark tests."""

1125
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
Toby Boyd's avatar
Toby Boyd committed
1126
1127
    def_flags = {}
    def_flags['skip_eval'] = True
1128
    def_flags['report_accuracy_metrics'] = False
Toby Boyd's avatar
Toby Boyd committed
1129
1130
1131
1132
    def_flags['use_synthetic_data'] = True
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

1133
1134
    super(Resnet50KerasBenchmarkSynth, self).__init__(
        output_dir=output_dir, default_flags=def_flags)
Toby Boyd's avatar
Toby Boyd committed
1135
1136
1137
1138
1139


class Resnet50KerasBenchmarkReal(Resnet50KerasBenchmarkBase):
  """Resnet50 real data benchmark tests."""

1140
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
Toby Boyd's avatar
Toby Boyd committed
1141
1142
    def_flags = {}
    def_flags['skip_eval'] = True
1143
    def_flags['report_accuracy_metrics'] = False
1144
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
Toby Boyd's avatar
Toby Boyd committed
1145
1146
1147
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

1148
1149
    super(Resnet50KerasBenchmarkReal, self).__init__(
        output_dir=output_dir, default_flags=def_flags)
1150
1151


1152
class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark):
1153
1154
1155
1156
  """Trivial model with real data benchmark tests."""

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
    flag_methods = [
1157
1158
        keras_common.define_keras_flags,
        lambda: imagenet_main.define_imagenet_flags(dynamic_loss_scale=True)
1159
1160
    ]
    def_flags = {}
1161
    def_flags['use_trivial_model'] = True
1162
    def_flags['skip_eval'] = True
1163
    def_flags['report_accuracy_metrics'] = False
1164
    def_flags['use_tensor_lr'] = True
1165
1166
1167
1168
1169
1170
    def_flags['dtype'] = 'fp16'
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
    def_flags['train_steps'] = 600
    def_flags['log_steps'] = 100
    def_flags['distribution_strategy'] = 'default'

1171
    super(TrivialKerasBenchmarkReal, self).__init__(
1172
1173
1174
1175
1176
1177
1178
1179
1180
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags=def_flags)

  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
    stats = keras_imagenet_main.run(FLAGS)
    wall_time_sec = time.time() - start_time_sec

1181
    super(TrivialKerasBenchmarkReal, self)._report_benchmark(
1182
1183
1184
1185
1186
        stats,
        wall_time_sec,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)

1187
1188
1189
1190
1191
1192
1193
  def benchmark_8_gpu_warmup(self):
    """Dummy test that runs over an epoch to warmup the machine."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_warmup')
1194
    FLAGS.batch_size = 256 * 8
1195
1196
1197
    FLAGS.train_steps = 700
    self._run_and_report_benchmark()

1198
1199
1200
1201
1202
1203
  def benchmark_1_gpu(self):
    """Test trivial Keras model (input pipeline) with 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
1204
    FLAGS.enable_xla = True
1205
1206
1207
1208
1209
1210
1211
1212
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_graph_1_gpu(self):
    """Test trivial Keras model (input pipeline) with 1 GPU."""
    self._setup()

1213
    FLAGS.num_gpus = 1
1214
    FLAGS.enable_eager = False
1215
    FLAGS.enable_xla = True
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_8_gpu(self):
    """Test trivial Keras model (input pipeline) with 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
1226
    FLAGS.enable_xla = True
1227
1228
1229
1230
1231
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
    FLAGS.batch_size = 256 * 8
    self._run_and_report_benchmark()

  def benchmark_8_gpu_tweaked(self):
1232
    """Test trivial Keras model with tuning and 8 GPUs."""
1233
1234
1235
1236
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
1237
    FLAGS.enable_xla = True
1238
1239
1240
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
1241
    FLAGS.datasets_num_private_threads = 48
1242
1243
1244
    self._run_and_report_benchmark()

  def benchmark_graph_8_gpu(self):
1245
    """Test trivial Keras model in legacy graph mode with 8 GPUs."""
1246
1247
1248
1249
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
1250
    FLAGS.enable_xla = True
1251
1252
1253
1254
1255
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
    FLAGS.batch_size = 256 * 8
    self._run_and_report_benchmark()

  def benchmark_graph_8_gpu_tweaked(self):
1256
    """Test trivial Keras model in legacy graph mode with tuning and 8 GPUs."""
1257
1258
1259
1260
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
1261
    FLAGS.enable_xla = True
1262
1263
1264
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
1265
    FLAGS.datasets_num_private_threads = 48
1266
1267
1268
    self._run_and_report_benchmark()

  def fill_report_object(self, stats):
1269
    super(TrivialKerasBenchmarkReal, self).fill_report_object(
1270
1271
1272
        stats,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)
1273
1274
1275
1276


if __name__ == '__main__':
  tf.test.main()