keras_imagenet_benchmark.py 39.3 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executes Keras benchmarks and accuracy tests."""
from __future__ import print_function

import os
19
import time
20
21

from absl import flags
22
import tensorflow as tf  # pylint: disable=g-bad-import-order
23

Toby Boyd's avatar
Toby Boyd committed
24
from official.resnet.keras import keras_benchmark
25
26
from official.resnet.keras import keras_imagenet_main

Toby Boyd's avatar
Toby Boyd committed
27
28
MIN_TOP_1_ACCURACY = 0.76
MAX_TOP_1_ACCURACY = 0.77
29

Toby Boyd's avatar
Toby Boyd committed
30
FLAGS = flags.FLAGS
31
32


Toby Boyd's avatar
Toby Boyd committed
33
34
class Resnet50KerasAccuracy(keras_benchmark.KerasBenchmark):
  """Benchmark accuracy tests for ResNet50 in Keras."""
35

36
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
37
38
39
40
41
    """A benchmark class.

    Args:
      output_dir: directory where to output e.g. log files
      root_data_dir: directory under which to look for dataset
42
43
44
      **kwargs: arbitrary named arguments. This is needed to make the
                constructor forward compatible in case PerfZero provides more
                named arguments before updating the constructor.
45
46
    """

Toby Boyd's avatar
Toby Boyd committed
47
    flag_methods = [keras_imagenet_main.define_imagenet_keras_flags]
Toby Boyd's avatar
Toby Boyd committed
48

49
    self.data_dir = os.path.join(root_data_dir, 'imagenet')
50
51
    super(Resnet50KerasAccuracy, self).__init__(
        output_dir=output_dir, flag_methods=flag_methods)
52

Toby Boyd's avatar
Toby Boyd committed
53
  def benchmark_graph_8_gpu(self):
54
55
    """Test Keras model with Keras fit/dist_strat and 8 GPUs."""
    self._setup()
Toby Boyd's avatar
Toby Boyd committed
56
    FLAGS.num_gpus = 8
57
    FLAGS.data_dir = self.data_dir
58
    FLAGS.batch_size = 128 * 8
Toby Boyd's avatar
Toby Boyd committed
59
    FLAGS.train_epochs = 90
60
    FLAGS.epochs_between_evals = 10
61
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
62
    FLAGS.dtype = 'fp32'
63
    FLAGS.use_tensor_lr = True
64
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
65
66

  def benchmark_8_gpu(self):
67
68
    """Test Keras model with eager, dist_strat and 8 GPUs."""
    self._setup()
Toby Boyd's avatar
Toby Boyd committed
69
    FLAGS.num_gpus = 8
70
    FLAGS.data_dir = self.data_dir
71
    FLAGS.batch_size = 128 * 8
Toby Boyd's avatar
Toby Boyd committed
72
    FLAGS.train_epochs = 90
73
    FLAGS.epochs_between_evals = 10
74
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
75
76
    FLAGS.dtype = 'fp32'
    FLAGS.enable_eager = True
77
78
    # Add some thread tunings to improve performance.
    FLAGS.datasets_num_private_threads = 14
79
    FLAGS.use_tensor_lr = True
80
    self._run_and_report_benchmark()
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
    
  def benchmark_8_gpu_amp(self):
    """Test Keras model with eager, dist_strat and 8 GPUs with automatic mixed precision."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 128 * 8
    FLAGS.train_epochs = 90
    FLAGS.epochs_between_evals = 10
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_amp')
    FLAGS.dtype = 'fp32'
    FLAGS.enable_eager = True
    FLAGS.automatic_mixed_precision = True
    # Add some thread tunings to improve performance.
    FLAGS.datasets_num_private_threads = 14
    FLAGS.use_tensor_lr = True
    self._run_and_report_benchmark()
    
Reed's avatar
Reed committed
99
100
101
102
103
104
105
  def benchmark_8_gpu_fp16(self):
    """Test Keras model with eager, dist_strat, 8 GPUs, and fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
106
    FLAGS.epochs_between_evals = 10
Reed's avatar
Reed committed
107
108
109
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
110
111
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
112
    FLAGS.use_tensor_lr = True
Reed's avatar
Reed committed
113
114
115
116
117
118
119
120
121
    self._run_and_report_benchmark()

  def benchmark_xla_8_gpu_fp16(self):
    """Test Keras model with XLA, eager, dist_strat, 8 GPUs and fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
122
    FLAGS.epochs_between_evals = 10
Reed's avatar
Reed committed
123
124
125
126
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
127
128
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
129
    FLAGS.use_tensor_lr = True
Reed's avatar
Reed committed
130
131
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
  def benchmark_8_gpu_mlperf_like_tweaked(self):
    """Test similar to the rules for MLPerf 0.5.

    Listed below are reasons this comparison is not to the MLSpec, but this is
    still a decent directional measurement:
      - Eval is every 4 epochs and again at the end. ~2 extra times.
      - Learning rate is not tuned to hit 75%, but we know the model is correct.
      - We measure total time and MLPerf 0.5 excluded some startup time.
      - Eval is not on the total set, need to set eval batch_size where
        8*batch_size/50K is even. 250 is a good number.
      - Not sure if we are doing any extra or too few steps due to epoch bleed.
    """
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 61
    FLAGS.epochs_between_evals = 4
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mlperf_like_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.use_tensor_lr = True
155
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
156
    self._run_and_report_benchmark(top_1_min=0.736)
Toby Boyd's avatar
Toby Boyd committed
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179

  def benchmark_8_gpu_mlperf_like(self):
    """Test similar to the rules for MLPerf 0.5.

    Listed below are reasons this comparison is not to the MLSpec, but this is
    still a decent directional measurement:
      - Eval is every 4 epochs and again at the end. ~2 extra times.
      - Learning rate is not tuned to hit 75%, but we know the model is correct.
      - We measure total time and MLPerf 0.5 excluded some startup time.
      - Eval is not on the total set, need to set eval batch_size where
        8*batch_size/50K is even. 250 is a good number.
      - Not sure if we are doing any extra or too few steps due to epoch bleed.
    """
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 61
    FLAGS.epochs_between_evals = 4
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_mlperf_like')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
180
    self._run_and_report_benchmark(top_1_min=0.736)
Toby Boyd's avatar
Toby Boyd committed
181

182
183
184
185
186
187
188
  def benchmark_xla_8_gpu_fp16_dynamic(self):
    """Test Keras model with XLA, eager, dist_strat, 8 GPUs, dynamic fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
189
    FLAGS.epochs_between_evals = 10
190
191
192
193
194
195
196
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.loss_scale = 'dynamic'
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
197
    FLAGS.use_tensor_lr = True
198
    self._run_and_report_benchmark(top_1_min=0.736)
199

200
201
202
  def _run_and_report_benchmark(self,
                                top_1_min=MIN_TOP_1_ACCURACY,
                                top_1_max=MAX_TOP_1_ACCURACY):
203
204
205
206
207
    start_time_sec = time.time()
    stats = keras_imagenet_main.run(flags.FLAGS)
    wall_time_sec = time.time() - start_time_sec

    super(Resnet50KerasAccuracy, self)._report_benchmark(
Toby Boyd's avatar
Toby Boyd committed
208
        stats,
209
        wall_time_sec,
210
211
        top_1_min=top_1_min,
        top_1_max=top_1_max,
212
        total_batch_size=FLAGS.batch_size,
Toby Boyd's avatar
Toby Boyd committed
213
        log_steps=100)
214
215
216
217

  def _get_model_dir(self, folder_name):
    return os.path.join(self.output_dir, folder_name)

Toby Boyd's avatar
Toby Boyd committed
218
219
220
221
222

class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
  """Resnet50 benchmarks."""

  def __init__(self, output_dir=None, default_flags=None):
Toby Boyd's avatar
Toby Boyd committed
223
    flag_methods = [keras_imagenet_main.define_imagenet_keras_flags]
Toby Boyd's avatar
Toby Boyd committed
224
225
226
227
228
229

    super(Resnet50KerasBenchmarkBase, self).__init__(
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags=default_flags)

230
231
  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
Toby Boyd's avatar
Toby Boyd committed
232
    stats = keras_imagenet_main.run(FLAGS)
233
    wall_time_sec = time.time() - start_time_sec
234
235
236
    # Number of logged step time entries that are excluded in performance
    # report. We keep results from last 100 batches in this case.
    warmup = (FLAGS.train_steps - 100) // FLAGS.log_steps
237
238
239
240
241

    super(Resnet50KerasBenchmarkBase, self)._report_benchmark(
        stats,
        wall_time_sec,
        total_batch_size=FLAGS.batch_size,
242
243
        log_steps=FLAGS.log_steps,
        warmup=warmup)
Toby Boyd's avatar
Toby Boyd committed
244
245

  def benchmark_1_gpu_no_dist_strat(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
246
    """Test Keras model with 1 GPU, no distribution strategy."""
Toby Boyd's avatar
Toby Boyd committed
247
248
249
250
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
251
    FLAGS.distribution_strategy = 'off'
252
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
Toby Boyd's avatar
Toby Boyd committed
253
    FLAGS.batch_size = 128
254
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
255

256
257
258
259
260
261
262
263
264
265
266
267
268
269
  def benchmark_1_gpu_no_dist_strat_tweaked(self):
    """Test with 1 GPU, no distribution strategy, and manual tuning."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.explicit_gpu_placement = True
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.set_learning_phase_to_train = False
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_tweaked')
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

270
271
272
273
274
275
276
277
278
279
280
281
282
  def benchmark_1_gpu_no_dist_strat_run_eagerly(self):
    """Test Keras model with 1 GPU, no distribution strategy, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly')
    FLAGS.batch_size = 64
    self._run_and_report_benchmark()

283
284
285
286
287
288
289
290
291
292
293
294
295
296
  def benchmark_1_gpu_no_dist_strat_run_eagerly_tweaked(self):
    """Test Keras model with 1 GPU, no distribution strategy, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.explicit_gpu_placement = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly_tweaked')
    FLAGS.batch_size = 64
    self._run_and_report_benchmark()

297
298
  def benchmark_1_gpu_no_dist_strat_force_v1_path_run_eagerly(self):
    """Forced v1 execution in tf.compile path and force eager."""
299
300
301
302
303
304
305
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
306
        'benchmark_1_gpu_no_dist_strat_force_v1_path_run_eagerly')
307
    FLAGS.batch_size = 64
308
    FLAGS.force_v2_in_keras_compile = False
309
310
    self._run_and_report_benchmark()

311
312
  def benchmark_1_gpu_no_dist_strat_force_v1_path_run_eagerly_tweaked(self):
    """Forced v1 execution in tf.compile path and force eager."""
313
314
315
316
317
318
319
320
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.explicit_gpu_placement = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
321
        'benchmark_1_gpu_no_dist_strat_force_v1_path_run_eagerly_tweaked')
322
    FLAGS.batch_size = 64
323
    FLAGS.force_v2_in_keras_compile = False
324
325
    self._run_and_report_benchmark()

326
327
  def benchmark_1_gpu_no_dist_strat_force_v1_path(self):
    """No dist strat but forced v1 execution tf.compile path."""
328
329
330
331
332
333
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
334
        'benchmark_1_gpu_no_dist_strat_force_v1_path')
335
    FLAGS.batch_size = 128
336
    FLAGS.force_v2_in_keras_compile = False
337
338
    self._run_and_report_benchmark()

339
340
341
342
343
344
345
346
347
348
349
350
351
352
  def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16(self):
    """Test with 1 GPU, no distribution strategy, fp16, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
  def benchmark_1_gpu_no_dist_strat_run_eagerly_fp16_tweaked(self):
    """Test with 1 GPU, no distribution strategy, fp16, run eagerly."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.run_eagerly = True
    FLAGS.explicit_gpu_placement = True
    FLAGS.distribution_strategy = 'off'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_1_gpu_no_dist_strat_run_eagerly_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
368
  def benchmark_graph_1_gpu_no_dist_strat(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
369
    """Test Keras model in legacy graph mode with 1 GPU, no dist strat."""
Toby Boyd's avatar
Toby Boyd committed
370
371
372
373
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
374
    FLAGS.distribution_strategy = 'off'
375
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_no_dist_strat')
376
377
    FLAGS.batch_size = 96  # BatchNorm is less efficient in legacy graph mode
                           # due to its reliance on v1 cond.
378
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
379
380

  def benchmark_1_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
381
    """Test Keras model with 1 GPU."""
Toby Boyd's avatar
Toby Boyd committed
382
383
384
385
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
386
    FLAGS.distribution_strategy = 'default'
387
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
Toby Boyd's avatar
Toby Boyd committed
388
    FLAGS.batch_size = 128
389
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
390

391
392
393
394
395
396
397
398
399
400
401
  def benchmark_1_gpu_amp(self):
    """Test Keras model with 1 GPU with automatic mixed precision."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.automatic_mixed_precision = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_amp')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()
402

Haoyu Zhang's avatar
Haoyu Zhang committed
403
404
405
406
407
408
409
410
411
412
413
414
  def benchmark_xla_1_gpu(self):
    """Test Keras model with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu')
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

415
416
417
418
419
420
421
422
423
424
425
426
427
  def benchmark_xla_1_gpu_amp(self):
    """Test Keras model with XLA and 1 GPU with automatic mixed precision."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.automatic_mixed_precision = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_amp')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
428
  def benchmark_1_gpu_fp16(self):
429
    """Test Keras model with 1 GPU and fp16."""
Reed's avatar
Reed committed
430
431
432
433
434
435
436
437
438
439
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

440
441
442
443
444
445
446
447
448
449
450
451
452
  def benchmark_1_gpu_fp16_dynamic(self):
    """Test Keras model with 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
453
454
455
456
457
458
459
460
461
462
463
464
465
  def benchmark_xla_1_gpu_fp16(self):
    """Test Keras model with XLA, 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

466
467
468
469
470
471
472
473
474
475
476
  def benchmark_xla_1_gpu_fp16_tweaked(self):
    """Test Keras model with XLA, 1 GPU, fp16, and manual config tuning."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
477
    FLAGS.use_tensor_lr = True
478
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
479
480
481
    self._run_and_report_benchmark()

  def benchmark_xla_1_gpu_fp16_slack(self):
482
    """Test Keras model tf.data's experimental_slack functionality."""
483
484
485
486
487
488
489
490
491
492
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_slack')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_data_experimental_slack = True
493
494
    self._run_and_report_benchmark()

495
496
497
498
499
500
501
502
503
504
505
506
507
508
  def benchmark_xla_1_gpu_fp16_dynamic(self):
    """Test Keras model with XLA, 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
509
  def benchmark_graph_1_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
510
    """Test Keras model in legacy graph mode with 1 GPU."""
Toby Boyd's avatar
Toby Boyd committed
511
512
513
514
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
515
    FLAGS.distribution_strategy = 'default'
516
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
Toby Boyd's avatar
Toby Boyd committed
517
    FLAGS.batch_size = 128
518
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
519

Haoyu Zhang's avatar
Haoyu Zhang committed
520
521
522
523
524
525
526
527
528
529
530
531
  def benchmark_graph_xla_1_gpu(self):
    """Test Keras model in legacy graph mode with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu')
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

532
533
534
535
536
  def benchmark_graph_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
537
    FLAGS.dtype = 'fp16'
538
539
540
541
542
543
544
545
546
547
548
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_graph_xla_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU, fp16 and XLA."""
    self._setup()

    FLAGS.num_gpus = 1
549
    FLAGS.dtype = 'fp16'
550
551
552
553
554
555
556
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

557
  def benchmark_graph_xla_1_gpu_fp16_tweaked(self):
558
    """Test Keras model in legacy graph with 1 GPU, fp16, XLA, and tuning."""
559
560
561
562
563
564
565
566
567
568
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
569
    FLAGS.use_tensor_lr = True
570
571
572
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

573
  def benchmark_graph_xla_1_gpu_fp16_slack(self):
574
    """Test model in legacy graph with tf.data's experimental_slack."""
575
576
577
578
579
580
581
582
583
584
585
586
587
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_1_gpu_fp16_slack')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_data_experimental_slack = True
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
588
  def benchmark_8_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
589
    """Test Keras model with 8 GPUs."""
Toby Boyd's avatar
Toby Boyd committed
590
591
592
593
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
594
    FLAGS.distribution_strategy = 'default'
595
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
596
    FLAGS.batch_size = 128 * 8  # 8 GPUs
597
    self._run_and_report_benchmark()
598

599
600
601
602
603
604
605
606
607
608
609
610
  def benchmark_8_gpu_amp(self):
    """Test Keras model with 8 GPUs with automatic mixed precision."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.automatic_mixed_precision = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_amp')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()
    
611
  def benchmark_8_gpu_tweaked(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
612
    """Test Keras model with manual config tuning and 8 GPUs."""
613
614
615
616
617
618
619
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked')
    FLAGS.batch_size = 128 * 8  # 8 GPUs
620
    FLAGS.use_tensor_lr = True
621
    FLAGS.datasets_num_private_threads = 14
622
623
624
625
626
627
628
629
630
631
632
633
    self._run_and_report_benchmark()

  def benchmark_8_gpu_slack(self):
    """Test Keras model with tf.data's experimental_slack and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_slack')
    FLAGS.batch_size = 128 * 8  # 8 GPUs
    FLAGS.tf_data_experimental_slack = True
634
635
    self._run_and_report_benchmark()

Haoyu Zhang's avatar
Haoyu Zhang committed
636
637
638
639
640
641
642
643
644
  def benchmark_xla_8_gpu(self):
    """Test Keras model with XLA and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu')
645
    FLAGS.batch_size = 128 * 8  # 8 GPUs
Haoyu Zhang's avatar
Haoyu Zhang committed
646
647
    self._run_and_report_benchmark()

648
649
650
651
652
653
654
655
656
657
658
659
660
  def benchmark_xla_8_gpu_amp(self):
    """Test Keras model with XLA and 8 GPUs with automatic mixed precision."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.automatic_mixed_precision = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_amp')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()
    
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
  def benchmark_xla_8_gpu_tweaked(self):
    """Test Keras model with manual config tuning, 8 GPUs, and XLA."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_tweaked')
    FLAGS.batch_size = 128 * 8
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.datasets_num_private_threads = 24
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
676
  def benchmark_8_gpu_fp16(self):
677
    """Test Keras model with 8 GPUs and fp16."""
Reed's avatar
Reed committed
678
679
680
    self._setup()

    FLAGS.num_gpus = 8
681
    FLAGS.dtype = 'fp16'
Reed's avatar
Reed committed
682
683
684
685
686
687
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

688
  def benchmark_8_gpu_fp16_tweaked(self):
689
    """Test Keras model with 8 GPUs, fp16, and manual config tuning."""
690
691
692
693
694
695
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
696
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_tweaked')
697
    FLAGS.batch_size = 256 * 8  # 8 GPUs
698
    FLAGS.use_tensor_lr = True
699
700
701
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

702
  def benchmark_8_gpu_fp16_dynamic_tweaked(self):
Toby Boyd's avatar
Toby Boyd committed
703
    """Test Keras model with 8 GPUs, fp16, dynamic loss scaling, and tuned."""
704
705
706
707
708
709
710
711
712
713
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
714
    FLAGS.use_tensor_lr = True
715
716
717
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

rxsang's avatar
rxsang committed
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
  def benchmark_xla_8_gpu_fp16_optional_next(self):
    """Test Keras model with XLA, 8 GPUs and fp16.

    This test also enables get_next_as_optional.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_optional_next')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.enable_get_next_as_optional = True
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
736
  def benchmark_xla_8_gpu_fp16(self):
737
    """Test Keras model with XLA, 8 GPUs and fp16."""
Reed's avatar
Reed committed
738
739
740
    self._setup()

    FLAGS.num_gpus = 8
741
    FLAGS.dtype = 'fp16'
Reed's avatar
Reed committed
742
743
744
745
746
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
747
748
    self._run_and_report_benchmark()

749
750
751
752
753
754
755
756
757
758
759
  def benchmark_xla_8_gpu_fp16_tweaked(self):
    """Test Keras model with manual config tuning, XLA, 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
760
    FLAGS.use_tensor_lr = True
761
762
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.datasets_num_private_threads = 48
763
764
    self._run_and_report_benchmark()

765
  def benchmark_xla_8_gpu_fp16_tweaked_delay_measure(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
766
767
768
    """Test with manual config tuning, XLA, 8 GPUs and fp16.

    Delay performance measurement for stable performance on 96 vCPU platforms.
769
770
771
772
773
774
775
776
777
778
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_tweaked_delay_measure')
779
    FLAGS.batch_size = 256 * 8
780
781
782
783
784
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.train_steps = 310
    self._run_and_report_benchmark()

785
  def benchmark_xla_8_gpu_fp16_tweaked_optional_next(self):
786
787
788
    """Test Keras model with manual config tuning, XLA, 8 GPUs, fp16.

    This test also enables get_next_as_optional.
789
790
791
792
793
794
795
796
797
798
799
800
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_tweaked_optional_next')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.use_tensor_lr = True
801
802
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.datasets_num_private_threads = 48
803
804
805
    FLAGS.enable_get_next_as_optional = True
    self._run_and_report_benchmark()

806
  def benchmark_xla_8_gpu_fp16_slack(self):
807
808
809
    """Test Keras model with XLA, 8 GPUs and fp16.

    This test also enable tf.data's experimental_slack functionality.
810
811
812
813
814
815
816
817
818
819
820
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_slack')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_data_experimental_slack = True
821
822
    self._run_and_report_benchmark()

823
824
825
826
827
828
829
830
831
832
833
834
835
  def benchmark_xla_8_gpu_fp16_dynamic_tweaked(self):
    """Test Keras model with config tuning, XLA, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
836
    FLAGS.use_tensor_lr = True
837
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
838
    FLAGS.datasets_num_private_threads = 48
839
840
    self._run_and_report_benchmark()

841
842
843
844
845
846
847
848
849
850
851
852
  def benchmark_xla_8_gpu_fp16_tensorboard_tweaked(self):
    """Test to track Tensorboard performance overhead."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_tensorboard_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
853
    FLAGS.use_tensor_lr = True
854
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
855
    FLAGS.datasets_num_private_threads = 48
856
857
858
    FLAGS.enable_tensorboard = True
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
859
  def benchmark_graph_8_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
860
    """Test Keras model in legacy graph mode with 8 GPUs."""
Toby Boyd's avatar
Toby Boyd committed
861
862
863
864
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
865
    FLAGS.distribution_strategy = 'default'
866
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
867
    FLAGS.batch_size = 128 * 8  # 8 GPUs
868
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
869

Haoyu Zhang's avatar
Haoyu Zhang committed
870
871
872
873
874
875
876
877
878
  def benchmark_graph_xla_8_gpu(self):
    """Test Keras model in legacy graph mode with XLA and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_8_gpu')
879
    FLAGS.batch_size = 128 * 8  # 8 GPUs
Haoyu Zhang's avatar
Haoyu Zhang committed
880
881
    self._run_and_report_benchmark()

882
883
884
885
886
887
888
889
890
891
892
893
  def benchmark_graph_8_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

894
895
896
897
898
899
900
901
902
903
904
905
906
  def benchmark_graph_xla_8_gpu_fp16(self):
    """Test Keras model in legacy graph mode with XLA, 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

907
  def benchmark_graph_8_gpu_fp16_tweaked(self):
908
    """Test Keras model in legacy graph mode, tuning, 8 GPUs, and FP16."""
909
910
911
912
913
914
915
916
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
917
    FLAGS.use_tensor_lr = True
918
919
920
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

921
  def benchmark_graph_xla_8_gpu_fp16_tweaked(self):
922
    """Test Keras model in legacy graph tuning, XLA_FP16, 8 GPUs and fp16."""
923
924
925
926
927
928
929
930
931
932
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
933
    FLAGS.use_tensor_lr = True
934
935
936
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

937
  def benchmark_graph_xla_8_gpu_fp16_tweaked_delay_measure(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
938
939
940
    """Test in legacy graph mode with manual config tuning, XLA, 8 GPUs, fp16.

    Delay performance measurement for stable performance on 96 vCPU platforms.
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_tweaked_delay_measure')
    FLAGS.batch_size = 256 * 8
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.train_steps = 310
    self._run_and_report_benchmark()

957
  def benchmark_graph_xla_8_gpu_fp16_tweaked_optional_next(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
958
    """Test in legacy graph mode with manual config tuning, XLA, 8 GPUs, fp16.
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976

    This test also enables get_next_as_optional.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_tweaked_optional_next')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.use_tensor_lr = True
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    FLAGS.enable_get_next_as_optional = True
    self._run_and_report_benchmark()

977
  def benchmark_graph_xla_8_gpu_fp16_slack(self):
978
    """Test legacy graph mode with tf.data's experimental_slack."""
979
980
981
982
983
984
985
986
987
988
989
990
991
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_slack')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_data_experimental_slack = True
    self._run_and_report_benchmark()

992
993
994
995
996
997
998
999
1000
1001
1002
1003
  def benchmark_graph_8_gpu_fp16_dynamic_tweaked(self):
    """Test graph Keras with config tuning, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
1004
    FLAGS.use_tensor_lr = True
1005
1006
1007
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
  def benchmark_graph_xla_8_gpu_fp16_dynamic_tweaked(self):
    """Test graph Keras with config tuning, XLA, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
1020
    FLAGS.use_tensor_lr = True
1021
1022
1023
1024
    FLAGS.loss_scale = 'dynamic'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
1025
1026
1027
1028
1029
1030
  def fill_report_object(self, stats):
    super(Resnet50KerasBenchmarkBase, self).fill_report_object(
        stats,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)

Toby Boyd's avatar
Toby Boyd committed
1031
1032
1033
1034

class Resnet50KerasBenchmarkSynth(Resnet50KerasBenchmarkBase):
  """Resnet50 synthetic benchmark tests."""

1035
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
Toby Boyd's avatar
Toby Boyd committed
1036
1037
    def_flags = {}
    def_flags['skip_eval'] = True
1038
    def_flags['report_accuracy_metrics'] = False
Toby Boyd's avatar
Toby Boyd committed
1039
1040
1041
1042
    def_flags['use_synthetic_data'] = True
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

1043
1044
    super(Resnet50KerasBenchmarkSynth, self).__init__(
        output_dir=output_dir, default_flags=def_flags)
Toby Boyd's avatar
Toby Boyd committed
1045
1046
1047
1048
1049


class Resnet50KerasBenchmarkReal(Resnet50KerasBenchmarkBase):
  """Resnet50 real data benchmark tests."""

1050
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
Toby Boyd's avatar
Toby Boyd committed
1051
1052
    def_flags = {}
    def_flags['skip_eval'] = True
1053
    def_flags['report_accuracy_metrics'] = False
1054
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
Toby Boyd's avatar
Toby Boyd committed
1055
1056
1057
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

1058
1059
    super(Resnet50KerasBenchmarkReal, self).__init__(
        output_dir=output_dir, default_flags=def_flags)
1060
1061


1062
class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark):
1063
1064
1065
  """Trivial model with real data benchmark tests."""

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
Toby Boyd's avatar
Toby Boyd committed
1066
1067
    flag_methods = [keras_imagenet_main.define_imagenet_keras_flags]

1068
    def_flags = {}
1069
    def_flags['use_trivial_model'] = True
1070
    def_flags['skip_eval'] = True
1071
    def_flags['report_accuracy_metrics'] = False
1072
    def_flags['use_tensor_lr'] = True
1073
1074
1075
1076
1077
1078
    def_flags['dtype'] = 'fp16'
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
    def_flags['train_steps'] = 600
    def_flags['log_steps'] = 100
    def_flags['distribution_strategy'] = 'default'

1079
    super(TrivialKerasBenchmarkReal, self).__init__(
1080
1081
1082
1083
1084
1085
1086
1087
1088
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags=def_flags)

  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
    stats = keras_imagenet_main.run(FLAGS)
    wall_time_sec = time.time() - start_time_sec

1089
    super(TrivialKerasBenchmarkReal, self)._report_benchmark(
1090
1091
1092
1093
1094
        stats,
        wall_time_sec,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)

1095
1096
1097
1098
1099
1100
1101
  def benchmark_8_gpu_warmup(self):
    """Dummy test that runs over an epoch to warmup the machine."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_warmup')
1102
    FLAGS.batch_size = 256 * 8
1103
1104
1105
    FLAGS.train_steps = 700
    self._run_and_report_benchmark()

1106
1107
1108
1109
1110
1111
  def benchmark_1_gpu(self):
    """Test trivial Keras model (input pipeline) with 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
1112
    FLAGS.enable_xla = True
1113
1114
1115
1116
1117
1118
1119
1120
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_graph_1_gpu(self):
    """Test trivial Keras model (input pipeline) with 1 GPU."""
    self._setup()

1121
    FLAGS.num_gpus = 1
1122
    FLAGS.enable_eager = False
1123
    FLAGS.enable_xla = True
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_8_gpu(self):
    """Test trivial Keras model (input pipeline) with 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
1134
    FLAGS.enable_xla = True
1135
1136
1137
1138
1139
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
    FLAGS.batch_size = 256 * 8
    self._run_and_report_benchmark()

  def benchmark_8_gpu_tweaked(self):
1140
    """Test trivial Keras model with tuning and 8 GPUs."""
1141
1142
1143
1144
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
1145
    FLAGS.enable_xla = True
1146
1147
1148
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
1149
    FLAGS.datasets_num_private_threads = 48
1150
1151
1152
    self._run_and_report_benchmark()

  def benchmark_graph_8_gpu(self):
1153
    """Test trivial Keras model in legacy graph mode with 8 GPUs."""
1154
1155
1156
1157
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
1158
    FLAGS.enable_xla = True
1159
1160
1161
1162
1163
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
    FLAGS.batch_size = 256 * 8
    self._run_and_report_benchmark()

  def benchmark_graph_8_gpu_tweaked(self):
1164
    """Test trivial Keras model in legacy graph mode with tuning and 8 GPUs."""
1165
1166
1167
1168
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
1169
    FLAGS.enable_xla = True
1170
1171
1172
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
1173
    FLAGS.datasets_num_private_threads = 48
1174
1175
1176
    self._run_and_report_benchmark()

  def fill_report_object(self, stats):
1177
    super(TrivialKerasBenchmarkReal, self).fill_report_object(
1178
1179
1180
        stats,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)
1181
1182
1183
1184


if __name__ == '__main__':
  tf.test.main()