keras_imagenet_benchmark.py 29.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executes Keras benchmarks and accuracy tests."""
from __future__ import print_function

import os
19
import time
20
21

from absl import flags
22
import tensorflow as tf # pylint: disable=g-bad-import-order
23
24

from official.resnet import imagenet_main
Toby Boyd's avatar
Toby Boyd committed
25
from official.resnet.keras import keras_benchmark
26
27
28
from official.resnet.keras import keras_common
from official.resnet.keras import keras_imagenet_main

Toby Boyd's avatar
Toby Boyd committed
29
30
MIN_TOP_1_ACCURACY = 0.76
MAX_TOP_1_ACCURACY = 0.77
31

Toby Boyd's avatar
Toby Boyd committed
32
FLAGS = flags.FLAGS
33
34


Toby Boyd's avatar
Toby Boyd committed
35
36
class Resnet50KerasAccuracy(keras_benchmark.KerasBenchmark):
  """Benchmark accuracy tests for ResNet50 in Keras."""
37

38
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
39
40
41
42
43
    """A benchmark class.

    Args:
      output_dir: directory where to output e.g. log files
      root_data_dir: directory under which to look for dataset
44
45
46
      **kwargs: arbitrary named arguments. This is needed to make the
                constructor forward compatible in case PerfZero provides more
                named arguments before updating the constructor.
47
48
    """

49
    flag_methods = [
50
51
        keras_common.define_keras_flags,
        lambda: imagenet_main.define_imagenet_flags(dynamic_loss_scale=True)
52
    ]
Toby Boyd's avatar
Toby Boyd committed
53

54
    self.data_dir = os.path.join(root_data_dir, 'imagenet')
55
56
    super(Resnet50KerasAccuracy, self).__init__(
        output_dir=output_dir, flag_methods=flag_methods)
57

Toby Boyd's avatar
Toby Boyd committed
58
  def benchmark_graph_8_gpu(self):
59
60
    """Test Keras model with Keras fit/dist_strat and 8 GPUs."""
    self._setup()
Toby Boyd's avatar
Toby Boyd committed
61
    FLAGS.num_gpus = 8
62
    FLAGS.data_dir = self.data_dir
63
    FLAGS.batch_size = 128 * 8
Toby Boyd's avatar
Toby Boyd committed
64
    FLAGS.train_epochs = 90
65
    FLAGS.epochs_between_evals = 10
66
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
67
    FLAGS.dtype = 'fp32'
68
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
69
70

  def benchmark_8_gpu(self):
71
72
    """Test Keras model with eager, dist_strat and 8 GPUs."""
    self._setup()
Toby Boyd's avatar
Toby Boyd committed
73
    FLAGS.num_gpus = 8
74
    FLAGS.data_dir = self.data_dir
75
    FLAGS.batch_size = 128 * 8
Toby Boyd's avatar
Toby Boyd committed
76
    FLAGS.train_epochs = 90
77
    FLAGS.epochs_between_evals = 10
78
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
79
80
    FLAGS.dtype = 'fp32'
    FLAGS.enable_eager = True
81
82
    # Add some thread tunings to improve performance.
    FLAGS.datasets_num_private_threads = 14
83
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
84

Reed's avatar
Reed committed
85
86
87
88
89
90
91
  def benchmark_8_gpu_fp16(self):
    """Test Keras model with eager, dist_strat, 8 GPUs, and fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
92
    FLAGS.epochs_between_evals = 10
Reed's avatar
Reed committed
93
94
95
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
96
97
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
Reed's avatar
Reed committed
98
99
100
101
102
103
104
105
106
    self._run_and_report_benchmark()

  def benchmark_xla_8_gpu_fp16(self):
    """Test Keras model with XLA, eager, dist_strat, 8 GPUs and fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
107
    FLAGS.epochs_between_evals = 10
Reed's avatar
Reed committed
108
109
110
111
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
112
113
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
Reed's avatar
Reed committed
114
115
    self._run_and_report_benchmark()

116
117
118
119
120
121
122
  def benchmark_xla_8_gpu_fp16_dynamic(self):
    """Test Keras model with XLA, eager, dist_strat, 8 GPUs, dynamic fp16."""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.data_dir = self.data_dir
    FLAGS.batch_size = 256 * 8
    FLAGS.train_epochs = 90
123
    FLAGS.epochs_between_evals = 10
124
125
126
127
128
129
130
131
132
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.loss_scale = 'dynamic'
    # Thread tuning to improve performance.
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

133
134
135
136
137
138
  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
    stats = keras_imagenet_main.run(flags.FLAGS)
    wall_time_sec = time.time() - start_time_sec

    super(Resnet50KerasAccuracy, self)._report_benchmark(
Toby Boyd's avatar
Toby Boyd committed
139
        stats,
140
        wall_time_sec,
Toby Boyd's avatar
Toby Boyd committed
141
142
        top_1_min=MIN_TOP_1_ACCURACY,
        top_1_max=MAX_TOP_1_ACCURACY,
143
        total_batch_size=FLAGS.batch_size,
Toby Boyd's avatar
Toby Boyd committed
144
        log_steps=100)
145
146
147
148

  def _get_model_dir(self, folder_name):
    return os.path.join(self.output_dir, folder_name)

Toby Boyd's avatar
Toby Boyd committed
149
150
151
152
153

class Resnet50KerasBenchmarkBase(keras_benchmark.KerasBenchmark):
  """Resnet50 benchmarks."""

  def __init__(self, output_dir=None, default_flags=None):
154
    flag_methods = [
155
156
        keras_common.define_keras_flags,
        lambda: imagenet_main.define_imagenet_flags(dynamic_loss_scale=True)
157
    ]
Toby Boyd's avatar
Toby Boyd committed
158
159
160
161
162
163

    super(Resnet50KerasBenchmarkBase, self).__init__(
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags=default_flags)

164
165
  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
Toby Boyd's avatar
Toby Boyd committed
166
    stats = keras_imagenet_main.run(FLAGS)
167
168
169
170
171
172
173
    wall_time_sec = time.time() - start_time_sec

    super(Resnet50KerasBenchmarkBase, self)._report_benchmark(
        stats,
        wall_time_sec,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)
Toby Boyd's avatar
Toby Boyd committed
174
175

  def benchmark_1_gpu_no_dist_strat(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
176
    """Test Keras model with 1 GPU, no distribution strategy."""
Toby Boyd's avatar
Toby Boyd committed
177
178
179
180
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
181
    FLAGS.distribution_strategy = 'off'
182
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_no_dist_strat')
Toby Boyd's avatar
Toby Boyd committed
183
    FLAGS.batch_size = 128
184
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
185
186

  def benchmark_graph_1_gpu_no_dist_strat(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
187
    """Test Keras model in legacy graph mode with 1 GPU, no dist strat."""
Toby Boyd's avatar
Toby Boyd committed
188
189
190
191
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
192
    FLAGS.distribution_strategy = 'off'
193
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_no_dist_strat')
194
195
    FLAGS.batch_size = 96  # BatchNorm is less efficient in legacy graph mode
                           # due to its reliance on v1 cond.
196
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
197
198

  def benchmark_1_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
199
    """Test Keras model with 1 GPU."""
Toby Boyd's avatar
Toby Boyd committed
200
201
202
203
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
204
    FLAGS.distribution_strategy = 'default'
205
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
Toby Boyd's avatar
Toby Boyd committed
206
    FLAGS.batch_size = 128
207
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
208

Haoyu Zhang's avatar
Haoyu Zhang committed
209
210
211
212
213
214
215
216
217
218
219
220
  def benchmark_xla_1_gpu(self):
    """Test Keras model with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu')
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
221
  def benchmark_1_gpu_fp16(self):
222
    """Test Keras model with 1 GPU and fp16."""
Reed's avatar
Reed committed
223
224
225
226
227
228
229
230
231
232
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

233
234
235
236
237
238
239
240
241
242
243
244
245
  def benchmark_1_gpu_fp16_dynamic(self):
    """Test Keras model with 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
246
247
248
249
250
251
252
253
254
255
256
257
258
  def benchmark_xla_1_gpu_fp16(self):
    """Test Keras model with XLA, 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

259
260
261
262
263
264
265
266
267
268
269
  def benchmark_xla_1_gpu_fp16_tweaked(self):
    """Test Keras model with XLA, 1 GPU, fp16, and manual config tuning."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
270
    FLAGS.use_tensor_lr = True
271
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
    FLAGS.data_delay_prefetch = True
    self._run_and_report_benchmark()

  def benchmark_xla_1_gpu_fp16_slack(self):
    """Test Keras model with XLA, 1 GPU, fp16, and tf.data's experimental_slack
       functionality."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_slack')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_data_experimental_slack = True
288
289
    self._run_and_report_benchmark()

290
291
292
293
294
295
296
297
298
299
300
301
302
303
  def benchmark_xla_1_gpu_fp16_dynamic(self):
    """Test Keras model with XLA, 1 GPU, fp16, and dynamic loss scaling."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_1_gpu_fp16_dynamic')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.loss_scale = 'dynamic'
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
304
  def benchmark_graph_1_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
305
    """Test Keras model in legacy graph mode with 1 GPU."""
Toby Boyd's avatar
Toby Boyd committed
306
307
308
309
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
310
    FLAGS.distribution_strategy = 'default'
311
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
Toby Boyd's avatar
Toby Boyd committed
312
    FLAGS.batch_size = 128
313
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
314

Haoyu Zhang's avatar
Haoyu Zhang committed
315
316
317
318
319
320
321
322
323
324
325
326
  def benchmark_graph_xla_1_gpu(self):
    """Test Keras model in legacy graph mode with XLA and 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu')
    FLAGS.batch_size = 128
    self._run_and_report_benchmark()

327
328
329
330
331
  def benchmark_graph_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU and fp16."""
    self._setup()

    FLAGS.num_gpus = 1
332
    FLAGS.dtype = 'fp16'
333
334
335
336
337
338
339
340
341
342
343
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_graph_xla_1_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 1 GPU, fp16 and XLA."""
    self._setup()

    FLAGS.num_gpus = 1
344
    FLAGS.dtype = 'fp16'
345
346
347
348
349
350
351
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_1_gpu_fp16')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

352
353
354
355
356
357
358
359
360
361
362
363
364
365
  def benchmark_graph_xla_1_gpu_fp16_tweaked(self):
    """Test Keras model in legacy graph mode with 1 GPU, fp16, XLA, and manual
       config tuning.
    """
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_1_gpu_fp16_tweaked')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
366
    FLAGS.use_tensor_lr = True
367
368
369
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
  def benchmark_graph_xla_1_gpu_fp16_slack(self):
    """Test Keras model in legacy graph mode with 1 GPU, fp16, XLA, and
       tf.data's experimental_slack functionality.
    """
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_1_gpu_fp16_slack')
    FLAGS.dtype = 'fp16'
    FLAGS.batch_size = 256
    FLAGS.tf_data_experimental_slack = True
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
387
  def benchmark_8_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
388
    """Test Keras model with 8 GPUs."""
Toby Boyd's avatar
Toby Boyd committed
389
390
391
392
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
393
    FLAGS.distribution_strategy = 'default'
394
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
395
    FLAGS.batch_size = 128 * 8  # 8 GPUs
396
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
397

398
399
  def benchmark_8_gpu_cloning(self):
    """Test Keras model with 8 GPUs and cloning."""
400
401
402
403
404
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
405
406
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_cloning')
407
408
409
    FLAGS.batch_size = 128 * 8  # 8 GPUs
    self._run_and_report_benchmark()

410
  def benchmark_8_gpu_tweaked(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
411
    """Test Keras model with manual config tuning and 8 GPUs."""
412
413
414
415
416
417
418
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked')
    FLAGS.batch_size = 128 * 8  # 8 GPUs
419
    FLAGS.use_tensor_lr = True
420
    FLAGS.datasets_num_private_threads = 14
421
422
423
424
425
426
427
428
429
430
431
432
433
    FLAGS.data_delay_prefetch = True
    self._run_and_report_benchmark()

  def benchmark_8_gpu_slack(self):
    """Test Keras model with tf.data's experimental_slack and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_slack')
    FLAGS.batch_size = 128 * 8  # 8 GPUs
    FLAGS.tf_data_experimental_slack = True
434
435
    self._run_and_report_benchmark()

Haoyu Zhang's avatar
Haoyu Zhang committed
436
437
438
439
440
441
442
443
444
  def benchmark_xla_8_gpu(self):
    """Test Keras model with XLA and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu')
445
    FLAGS.batch_size = 128 * 8  # 8 GPUs
Haoyu Zhang's avatar
Haoyu Zhang committed
446
447
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
448
  def benchmark_8_gpu_fp16(self):
449
    """Test Keras model with 8 GPUs and fp16."""
Reed's avatar
Reed committed
450
451
452
    self._setup()

    FLAGS.num_gpus = 8
453
    FLAGS.dtype = 'fp16'
Reed's avatar
Reed committed
454
455
456
457
458
459
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

460
461
462
463
464
465
466
467
468
469
470
471
472
  def benchmark_8_gpu_fp16_cloning(self):
    """Test Keras model with 8 GPUs, fp16 and cloning."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_cloning')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

473
  def benchmark_8_gpu_fp16_tweaked(self):
474
    """Test Keras model with 8 GPUs, fp16, and manual config tuning."""
475
476
477
478
479
480
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
481
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_fp16_tweaked')
482
    FLAGS.batch_size = 256 * 8  # 8 GPUs
483
    FLAGS.use_tensor_lr = True
484
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
485
    FLAGS.data_delay_prefetch = True
486
487
    self._run_and_report_benchmark()

488
  def benchmark_8_gpu_fp16_dynamic_tweaked(self):
489
490
491
    """Test Keras model with 8 GPUs, fp16, dynamic loss scaling, and manual
       config tuning.
    """
492
493
494
495
496
497
498
499
500
501
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
502
    FLAGS.use_tensor_lr = True
503
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
504
    FLAGS.data_delay_prefetch = True
505
506
    self._run_and_report_benchmark()

Reed's avatar
Reed committed
507
  def benchmark_xla_8_gpu_fp16(self):
508
    """Test Keras model with XLA, 8 GPUs and fp16."""
Reed's avatar
Reed committed
509
510
511
    self._setup()

    FLAGS.num_gpus = 8
512
    FLAGS.dtype = 'fp16'
Reed's avatar
Reed committed
513
514
515
516
517
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
518
519
520
521
522
523
524
525
526
527
528
529
530
531
    self._run_and_report_benchmark()

  def benchmark_xla_8_gpu_fp16_cloning(self):
    """Test Keras model with XLA, 8 GPUs, fp16 and cloning."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.clone_model_in_keras_dist_strat = True
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_cloning')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
Reed's avatar
Reed committed
532
533
    self._run_and_report_benchmark()

534
535
536
537
538
539
540
541
542
543
544
  def benchmark_xla_8_gpu_fp16_tweaked(self):
    """Test Keras model with manual config tuning, XLA, 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
545
    FLAGS.use_tensor_lr = True
546
    # FLAGS.tf_gpu_thread_mode = 'gpu_private'
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
    FLAGS.data_delay_prefetch = True
    self._run_and_report_benchmark()

  def benchmark_xla_8_gpu_fp16_slack(self):
    """Test Keras model with tf.data's experimental_slack functionality, XLA,
       8 GPUs and fp16.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_xla_8_gpu_fp16_slack')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_data_experimental_slack = True
564
565
    self._run_and_report_benchmark()

566
567
568
569
570
571
572
573
574
575
576
577
578
  def benchmark_xla_8_gpu_fp16_dynamic_tweaked(self):
    """Test Keras model with config tuning, XLA, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
579
    FLAGS.use_tensor_lr = True
580
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
581
    FLAGS.data_delay_prefetch = True
582
583
    self._run_and_report_benchmark()

584
585
586
587
588
589
590
591
592
593
594
595
  def benchmark_xla_8_gpu_fp16_tensorboard_tweaked(self):
    """Test to track Tensorboard performance overhead."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = True
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_xla_8_gpu_fp16_tensorboard_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
596
    FLAGS.use_tensor_lr = True
597
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
598
    FLAGS.data_delay_prefetch = True
599
600
601
    FLAGS.enable_tensorboard = True
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
602
  def benchmark_graph_8_gpu(self):
Haoyu Zhang's avatar
Haoyu Zhang committed
603
    """Test Keras model in legacy graph mode with 8 GPUs."""
Toby Boyd's avatar
Toby Boyd committed
604
605
606
607
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
608
    FLAGS.distribution_strategy = 'default'
609
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
Toby Boyd's avatar
Toby Boyd committed
610
    FLAGS.batch_size = 128 * 8  # 8 GPUs
611
    self._run_and_report_benchmark()
Toby Boyd's avatar
Toby Boyd committed
612

Haoyu Zhang's avatar
Haoyu Zhang committed
613
614
615
616
617
618
619
620
621
  def benchmark_graph_xla_8_gpu(self):
    """Test Keras model in legacy graph mode with XLA and 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_8_gpu')
622
    FLAGS.batch_size = 128 * 8  # 8 GPUs
Haoyu Zhang's avatar
Haoyu Zhang committed
623
624
    self._run_and_report_benchmark()

625
626
627
628
629
630
631
632
633
634
635
636
  def benchmark_graph_8_gpu_fp16(self):
    """Test Keras model in legacy graph mode with 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

637
638
639
640
641
642
643
644
645
646
647
648
649
  def benchmark_graph_xla_8_gpu_fp16(self):
    """Test Keras model in legacy graph mode with XLA, 8 GPUs and fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_xla_8_gpu_fp16')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    self._run_and_report_benchmark()

650
651
652
653
654
655
656
657
658
659
660
661
  def benchmark_graph_8_gpu_fp16_tweaked(self):
    """Test Keras model in legacy graph mode with manual config tuning, 8 GPUs
       and fp16.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
662
    FLAGS.use_tensor_lr = True
663
664
665
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

666
667
668
669
670
671
672
673
674
675
676
677
678
679
  def benchmark_graph_xla_8_gpu_fp16_tweaked(self):
    """Test Keras model in legacy graph mode with manual config tuning, XLA,
       8 GPUs and fp16.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
680
    FLAGS.use_tensor_lr = True
681
682
683
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
  def benchmark_graph_xla_8_gpu_fp16_slack(self):
    """Test Keras model in legacy graph mode with tf.data's experimental_slack
       functionality, XLA, 8 GPUs and fp16.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_slack')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.tf_data_experimental_slack = True
    self._run_and_report_benchmark()

701
702
703
704
705
706
707
708
709
710
711
712
  def benchmark_graph_8_gpu_fp16_dynamic_tweaked(self):
    """Test graph Keras with config tuning, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
    FLAGS.loss_scale = 'dynamic'
713
    FLAGS.use_tensor_lr = True
714
715
716
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

717
718
719
720
721
722
723
724
725
726
727
728
  def benchmark_graph_xla_8_gpu_fp16_dynamic_tweaked(self):
    """Test graph Keras with config tuning, XLA, 8 GPUs and dynamic fp16."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.dtype = 'fp16'
    FLAGS.enable_eager = False
    FLAGS.enable_xla = True
    FLAGS.distribution_strategy = 'default'
    FLAGS.model_dir = self._get_model_dir(
        'benchmark_graph_xla_8_gpu_fp16_dynamic_tweaked')
    FLAGS.batch_size = 256 * 8  # 8 GPUs
729
    FLAGS.use_tensor_lr = True
730
731
732
733
    FLAGS.loss_scale = 'dynamic'
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

Toby Boyd's avatar
Toby Boyd committed
734
735
736
737
738
739
  def fill_report_object(self, stats):
    super(Resnet50KerasBenchmarkBase, self).fill_report_object(
        stats,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)

Toby Boyd's avatar
Toby Boyd committed
740
741
742
743

class Resnet50KerasBenchmarkSynth(Resnet50KerasBenchmarkBase):
  """Resnet50 synthetic benchmark tests."""

744
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
Toby Boyd's avatar
Toby Boyd committed
745
746
    def_flags = {}
    def_flags['skip_eval'] = True
747
    def_flags['report_accuracy_metrics'] = False
Toby Boyd's avatar
Toby Boyd committed
748
749
750
751
    def_flags['use_synthetic_data'] = True
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

752
753
    super(Resnet50KerasBenchmarkSynth, self).__init__(
        output_dir=output_dir, default_flags=def_flags)
Toby Boyd's avatar
Toby Boyd committed
754
755
756
757
758


class Resnet50KerasBenchmarkReal(Resnet50KerasBenchmarkBase):
  """Resnet50 real data benchmark tests."""

759
  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
Toby Boyd's avatar
Toby Boyd committed
760
761
    def_flags = {}
    def_flags['skip_eval'] = True
762
    def_flags['report_accuracy_metrics'] = False
763
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
Toby Boyd's avatar
Toby Boyd committed
764
765
766
    def_flags['train_steps'] = 110
    def_flags['log_steps'] = 10

767
768
    super(Resnet50KerasBenchmarkReal, self).__init__(
        output_dir=output_dir, default_flags=def_flags)
769
770


771
class TrivialKerasBenchmarkReal(keras_benchmark.KerasBenchmark):
772
773
774
775
  """Trivial model with real data benchmark tests."""

  def __init__(self, output_dir=None, root_data_dir=None, **kwargs):
    flag_methods = [
776
777
        keras_common.define_keras_flags,
        lambda: imagenet_main.define_imagenet_flags(dynamic_loss_scale=True)
778
779
780
    ]
    def_flags = {}
    def_flags['skip_eval'] = True
781
    def_flags['report_accuracy_metrics'] = False
782
783
784
785
786
787
788
    def_flags['dtype'] = 'fp16'
    def_flags['enable_xla'] = True
    def_flags['data_dir'] = os.path.join(root_data_dir, 'imagenet')
    def_flags['train_steps'] = 600
    def_flags['log_steps'] = 100
    def_flags['distribution_strategy'] = 'default'

789
    super(TrivialKerasBenchmarkReal, self).__init__(
790
791
792
793
794
795
796
797
798
        output_dir=output_dir,
        flag_methods=flag_methods,
        default_flags=def_flags)

  def _run_and_report_benchmark(self):
    start_time_sec = time.time()
    stats = keras_imagenet_main.run(FLAGS)
    wall_time_sec = time.time() - start_time_sec

799
    super(TrivialKerasBenchmarkReal, self)._report_benchmark(
800
801
802
803
804
        stats,
        wall_time_sec,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)

805
806
807
808
809
810
811
812
813
814
815
  def benchmark_8_gpu_warmup(self):
    """Dummy test that runs over an epoch to warmup the machine."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_warmup')
    FLAGS.batch_size = 256
    FLAGS.train_steps = 700
    self._run_and_report_benchmark()

816
817
818
819
820
821
822
823
824
825
826
827
828
829
  def benchmark_1_gpu(self):
    """Test trivial Keras model (input pipeline) with 1 GPU."""
    self._setup()

    FLAGS.num_gpus = 1
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_1_gpu')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_graph_1_gpu(self):
    """Test trivial Keras model (input pipeline) with 1 GPU."""
    self._setup()

830
    FLAGS.num_gpus = 1
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
    FLAGS.enable_eager = False
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_1_gpu')
    FLAGS.batch_size = 256
    self._run_and_report_benchmark()

  def benchmark_8_gpu(self):
    """Test trivial Keras model (input pipeline) with 8 GPUs."""
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu')
    FLAGS.batch_size = 256 * 8
    self._run_and_report_benchmark()

  def benchmark_8_gpu_tweaked(self):
    """Test trivial Keras model (input pipeline) with manual config tuning and
       8 GPUs.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
857
858
859
860
861
862
863
864
865
866
867
868
869
870
    FLAGS.data_delay_prefetch = True
    self._run_and_report_benchmark()

  def benchmark_8_gpu_slack(self):
    """Test trivial Keras model (input pipeline) with tf.data's
       experimental_slack and 8 GPUs.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = True
    FLAGS.model_dir = self._get_model_dir('benchmark_8_gpu_slack')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_data_experimental_slack = True
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
    self._run_and_report_benchmark()

  def benchmark_graph_8_gpu(self):
    """Test trivial Keras model (input pipeline) in legacy graph mode with 8
       GPUs.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu')
    FLAGS.batch_size = 256 * 8
    self._run_and_report_benchmark()

  def benchmark_graph_8_gpu_tweaked(self):
    """Test trivial Keras model (input pipeline) in legacy graph mode with
       manual config tuning and 8 GPUs.
    """
    self._setup()

    FLAGS.num_gpus = 8
    FLAGS.enable_eager = False
    FLAGS.model_dir = self._get_model_dir('benchmark_graph_8_gpu_tweaked')
    FLAGS.batch_size = 256 * 8
    FLAGS.tf_gpu_thread_mode = 'gpu_private'
    self._run_and_report_benchmark()

  def fill_report_object(self, stats):
899
    super(TrivialKerasBenchmarkReal, self).fill_report_object(
900
901
902
        stats,
        total_batch_size=FLAGS.batch_size,
        log_steps=FLAGS.log_steps)
903
904
905
906


if __name__ == '__main__':
  tf.test.main()