ncf_keras_benchmark.py 15.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Executes Keras benchmarks and accuracy tests."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import time

from absl import flags
from absl.testing import flagsaver
26
from absl import logging
Hongkun Yu's avatar
Hongkun Yu committed
27
import tensorflow as tf
28
29
30
31

from official.recommendation import ncf_common
from official.recommendation import ncf_keras_main
from official.utils.flags import core
32
from official.utils.testing import benchmark_wrappers
33
34

FLAGS = flags.FLAGS
Toby Boyd's avatar
Toby Boyd committed
35
NCF_DATA_DIR_NAME = 'movielens_data'
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
36
NCF_TF_DATA_1M_BATCH_DIR_NAME = 'gs://tf-perfzero-data/movielens_data/ncf_8gpu_1M_batch'
Toby Boyd's avatar
Toby Boyd committed
37

38

39
class NCFKerasBenchmarkBase(tf.test.Benchmark):
40
41
42
43
44
45
46
47
48
  """Base class for NCF model benchmark."""
  local_flags = None

  def __init__(self,
               output_dir=None,
               default_flags=None,
               **kwargs):
    self.output_dir = output_dir
    self.default_flags = default_flags or {}
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
49
50
    # Run all benchmarks with ml_perf flag.
    self.default_flags['ml_perf'] = True
51
52
53

  def _setup(self):
    """Sets up and resets flags before each test."""
54
    assert tf.version.VERSION.startswith('2.')
55
    logging.set_verbosity(logging.INFO)
56
    if NCFKerasBenchmarkBase.local_flags is None:
Toby Boyd's avatar
Toby Boyd committed
57
      ncf_common.define_ncf_flags()
58
59
60
61
      # Loads flags to get defaults to then override. List cannot be empty.
      flags.FLAGS(['foo'])
      core.set_defaults(**self.default_flags)
      saved_flag_values = flagsaver.save_flag_values()
62
      NCFKerasBenchmarkBase.local_flags = saved_flag_values
63
    else:
64
      flagsaver.restore_flag_values(NCFKerasBenchmarkBase.local_flags)
65

66
  @benchmark_wrappers.enable_runtime_flags
Toby Boyd's avatar
Toby Boyd committed
67
  def _run_and_report_benchmark(self, hr_at_10_min=0, hr_at_10_max=0):
68
69
70
71
    start_time_sec = time.time()
    stats = ncf_keras_main.run_ncf(FLAGS)
    wall_time_sec = time.time() - start_time_sec

Toby Boyd's avatar
Toby Boyd committed
72
73
74
    metrics = []
    metrics.append({'name': 'exp_per_second',
                    'value': stats['avg_exp_per_second']})
75

Toby Boyd's avatar
Toby Boyd committed
76
77
78
79
80
81
82
83
84
85
    if hr_at_10_min > 0:
      metrics.append({'name': 'hr_at_10',
                      'value': stats['eval_hit_rate'],
                      'min_value': hr_at_10_min,
                      'max_value': hr_at_10_max})

      metrics.append({'name': 'train_loss',
                      'value': stats['loss']})

    self.report_benchmark(iters=-1, wall_time=wall_time_sec, metrics=metrics)
86
87


88
class NCFKerasAccuracy(NCFKerasBenchmarkBase):
89
90
91
92
  """Benchmark NCF model using real data."""

  def __init__(self,
               output_dir=None,
Toby Boyd's avatar
Toby Boyd committed
93
               root_data_dir=None,
94
95
               default_flags=None,
               **kwargs):
Hongkun Yu's avatar
Hongkun Yu committed
96
    root_data_dir = root_data_dir if root_data_dir else ''
97
98
99
    default_flags = {}
    default_flags['dataset'] = 'ml-20m'
    default_flags['num_gpus'] = 1
100
    default_flags['train_epochs'] = 10
101
    default_flags['clean'] = True
102
    default_flags['batch_size'] = 99000
103
104
105
106
107
108
109
    default_flags['learning_rate'] = 0.00382059
    default_flags['beta1'] = 0.783529
    default_flags['beta2'] = 0.909003
    default_flags['epsilon'] = 1.45439e-07
    default_flags['layers'] = [256, 256, 128, 64]
    default_flags['num_factors'] = 64
    default_flags['hr_threshold'] = 0.635
110
    default_flags['ml_perf'] = True
111
    default_flags['use_synthetic_data'] = False
Toby Boyd's avatar
Toby Boyd committed
112
    default_flags['data_dir'] = os.path.join(root_data_dir, NCF_DATA_DIR_NAME)
113

114
    super(NCFKerasAccuracy, self).__init__(
115
116
117
118
        output_dir=output_dir,
        default_flags=default_flags,
        **kwargs)

Toby Boyd's avatar
Toby Boyd committed
119
120
  def _run_and_report_benchmark_mlperf_like(self):
    """Run test and report results.
Toby Boyd's avatar
Toby Boyd committed
121

Toby Boyd's avatar
Toby Boyd committed
122
123
124
    Note: MLPerf like tests are not tuned to hit a specific hr@10 value, but
    we want it recorded.
    """
125
    self._run_and_report_benchmark(hr_at_10_min=0.61)
Toby Boyd's avatar
Toby Boyd committed
126

127
  def _run_and_report_benchmark(self, hr_at_10_min=0.630, hr_at_10_max=0.645):
Toby Boyd's avatar
Toby Boyd committed
128
    """Run test and report results.
Toby Boyd's avatar
Toby Boyd committed
129

Toby Boyd's avatar
Toby Boyd committed
130
131
132
133
134
135
136
137
    Note: Target is 0.635, but some runs are below that level. Until we have
    multi-run tests, we have to accept a lower target.

    Args:
      hr_at_10_min: Minimum acceptable hr@10 value.
      hr_at_10_max: Maximum acceptable hr@10 value.
    """
    super(NCFKerasAccuracy, self)._run_and_report_benchmark(
138
139
        hr_at_10_min=hr_at_10_min,
        hr_at_10_max=hr_at_10_max)
140

141
  def benchmark_1_gpu_early_stop(self):
142
    self._setup()
143
    FLAGS.early_stopping = True
144
145
    self._run_and_report_benchmark()

146
147
148
149
150
151
  def benchmark_1_gpu_no_dist_strat_early_stop(self):
    self._setup()
    FLAGS.distribution_strategy = 'off'
    FLAGS.early_stopping = True
    self._run_and_report_benchmark()

152
153
154
155
156
157
158
159
160
161
162
163
164
  def benchmark_1_gpu_no_dist_strat_run_eagerly_early_stop(self):
    self._setup()
    FLAGS.distribution_strategy = 'off'
    FLAGS.early_stopping = True
    FLAGS.run_eagerly = True
    self._run_and_report_benchmark()

  def benchmark_xla_1_gpu_early_stop(self):
    self._setup()
    FLAGS.early_stopping = True
    FLAGS.enable_xla = True
    self._run_and_report_benchmark()

165
166
167
168
169
170
  def benchmark_1_gpu_ctl_early_stop(self):
    self._setup()
    FLAGS.keras_use_ctl = True
    FLAGS.early_stopping = True
    self._run_and_report_benchmark()

171
172
173
174
175
176
177
  def benchmark_1_gpu_ctl_run_eagerly_early_stop(self):
    self._setup()
    FLAGS.keras_use_ctl = True
    FLAGS.early_stopping = True
    FLAGS.run_eagerly = True
    self._run_and_report_benchmark()

178
179
180
181
182
183
184
  def benchmark_xla_1_gpu_ctl_early_stop(self):
    self._setup()
    FLAGS.keras_use_ctl = True
    FLAGS.early_stopping = True
    FLAGS.enable_xla = True
    self._run_and_report_benchmark()

185
186
187
188
  def benchmark_2_gpus_early_stop(self):
    self._setup()
    FLAGS.early_stopping = True
    FLAGS.num_gpus = 2
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
189
    FLAGS.eval_batch_size = 160000
190
    self._run_and_report_benchmark()
191

192
  def benchmark_2_gpus_ctl_early_stop(self):
193
    """NCF with custom training loop. Works only in TF 2.0."""
194
195
196
197
    self._setup()
    FLAGS.keras_use_ctl = True
    FLAGS.early_stopping = True
    FLAGS.num_gpus = 2
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
198
    FLAGS.eval_batch_size = 160000
199
200
    self._run_and_report_benchmark()

201
#############################################
202
# Tests below with mlperf in the test name are of two types:
203
204
205
206
207
208
209
#  1) 1 GPU tests are based on MLPerf 0.5 and the TensorFlow pulled submission.
#  2) 8 GPU tests are based on MLPerf 0.5 and use NVIDIA's hyper parameters.
#
# The purpose of both is to get a number to compare to existing results. To do
# this the number of epochs is held constant rather than a race to a given
# accuracy. The accuracy validation is done by the "early_stop" tests.
#############################################
210
211

  def benchmark_1_gpu_mlperf_like(self):
212
    """1 GPU using keras fit/compile."""
213
214
    self._setup()
    FLAGS.train_epochs = 7
Toby Boyd's avatar
Toby Boyd committed
215
    self._run_and_report_benchmark_mlperf_like()
216
217

  def benchmark_1_gpu_no_dist_strat_mlperf_like(self):
218
    """1 GPU using compile/fit without dist_strat."""
219
220
221
    self._setup()
    FLAGS.train_epochs = 7
    FLAGS.distribution_strategy = 'off'
Toby Boyd's avatar
Toby Boyd committed
222
    self._run_and_report_benchmark_mlperf_like()
223
224
225
226
227
228

  def benchmark_1_gpu_no_dist_strat_run_eagerly_mlperf_like(self):
    self._setup()
    FLAGS.train_epochs = 7
    FLAGS.distribution_strategy = 'off'
    FLAGS.run_eagerly = True
Toby Boyd's avatar
Toby Boyd committed
229
    self._run_and_report_benchmark_mlperf_like()
230
231

  def benchmark_xla_1_gpu_mlperf_like(self):
232
    """1 GPU using compile/fit with XLA."""
233
234
    self._setup()
    FLAGS.train_epochs = 7
235
    FLAGS.enable_xla = True
Toby Boyd's avatar
Toby Boyd committed
236
    self._run_and_report_benchmark_mlperf_like()
237

238
239
240
241
242
  def benchmark_1_gpu_ctl_mlperf_like(self):
    """1 GPU using CTL."""
    self._setup()
    FLAGS.keras_use_ctl = True
    FLAGS.train_epochs = 7
Toby Boyd's avatar
Toby Boyd committed
243
    self._run_and_report_benchmark_mlperf_like()
244

Nimit Nigania's avatar
Nimit Nigania committed
245
  def benchmark_1_gpu_ctl_fp16_mlperf_like(self):
Tomasz Grel's avatar
Tomasz Grel committed
246
    """1 GPU using CTL and FP16."""
Nimit Nigania's avatar
Nimit Nigania committed
247
248
249
250
251
252
253
    self._setup()
    FLAGS.keras_use_ctl = True
    FLAGS.train_epochs = 7
    FLAGS.dtype = 'fp16'
    FLAGS.loss_scale = 8192
    self._run_and_report_benchmark_mlperf_like()

Tomasz Grel's avatar
Tomasz Grel committed
254
255
256
257
258
259
260
261
  def benchmark_1_gpu_fp16_mlperf_like(self):
    """1 GPU using FP16."""
    self._setup()
    FLAGS.train_epochs = 7
    FLAGS.dtype = 'fp16'
    FLAGS.loss_scale = 8192
    self._run_and_report_benchmark_mlperf_like()

262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
  def benchmark_1_gpu_ctl_fp16_graph_rewrite_mlperf_like(self):
    """1 GPU using CTL and FP16 graph rewrite."""
    self._setup()
    FLAGS.keras_use_ctl = True
    FLAGS.train_epochs = 7
    FLAGS.dtype = 'fp16'
    FLAGS.fp16_implementation = 'graph_rewrite'
    FLAGS.loss_scale = 8192
    self._run_and_report_benchmark_mlperf_like()

  def benchmark_1_gpu_fp16_graph_rewrite_mlperf_like(self):
    """1 GPU using FP16 graph rewrite."""
    self._setup()
    FLAGS.train_epochs = 7
    FLAGS.dtype = 'fp16'
    FLAGS.fp16_implementation = 'graph_rewrite'
    FLAGS.loss_scale = 8192
    self._run_and_report_benchmark_mlperf_like()

281
282
283
284
285
286
287
288
  def benchmark_1_gpu_ctl_run_eagerly_mlperf_like(self):
    """1 GPU using CTL with eager and distribution strategy."""
    self._setup()
    FLAGS.keras_use_ctl = True
    FLAGS.run_eagerly = True
    FLAGS.train_epochs = 7
    self._run_and_report_benchmark()

289
290
  def benchmark_xla_1_gpu_ctl_mlperf_like(self):
    """1 GPU using CTL with XLA."""
291
292
    self._setup()
    FLAGS.keras_use_ctl = True
293
294
    FLAGS.enable_xla = True
    FLAGS.train_epochs = 7
Toby Boyd's avatar
Toby Boyd committed
295
    self._run_and_report_benchmark_mlperf_like()
296

Tomasz Grel's avatar
Tomasz Grel committed
297
298
299
300
301
302
303
304
305
  def benchmark_xla_1_gpu_fp16_mlperf_like(self):
    """1 GPU using with XLA and FP16."""
    self._setup()
    FLAGS.enable_xla = True
    FLAGS.train_epochs = 7
    FLAGS.dtype = 'fp16'
    FLAGS.loss_scale = 8192
    self._run_and_report_benchmark_mlperf_like()

Nimit Nigania's avatar
Nimit Nigania committed
306
  def benchmark_xla_1_gpu_ctl_fp16_mlperf_like(self):
Tomasz Grel's avatar
Tomasz Grel committed
307
    """1 GPU using CTL with XLA and FP16."""
Nimit Nigania's avatar
Nimit Nigania committed
308
309
310
311
312
313
314
315
    self._setup()
    FLAGS.keras_use_ctl = True
    FLAGS.enable_xla = True
    FLAGS.train_epochs = 7
    FLAGS.dtype = 'fp16'
    FLAGS.loss_scale = 8192
    self._run_and_report_benchmark_mlperf_like()

316
317
318
  def benchmark_8_gpu_mlperf_like(self):
    """8 GPU using keras fit/compile."""
    self._setup()
319
320
321
    FLAGS.num_gpus = 8
    FLAGS.train_epochs = 17
    FLAGS.batch_size = 1048576
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
322
    FLAGS.eval_batch_size = 160000
323
324
325
326
    FLAGS.learning_rate = 0.0045
    FLAGS.beta1 = 0.25
    FLAGS.beta2 = 0.5
    FLAGS.epsilon = 1e-8
Toby Boyd's avatar
Toby Boyd committed
327
    self._run_and_report_benchmark_mlperf_like()
328

329
330
331
332
333
334
335
  def benchmark_8_gpu_ctl_mlperf_like(self):
    """8 GPU using CTL."""
    self._setup()
    FLAGS.keras_use_ctl = True
    FLAGS.num_gpus = 8
    FLAGS.train_epochs = 17
    FLAGS.batch_size = 1048576
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
336
    FLAGS.eval_batch_size = 160000
337
338
339
340
    FLAGS.learning_rate = 0.0045
    FLAGS.beta1 = 0.25
    FLAGS.beta2 = 0.5
    FLAGS.epsilon = 1e-8
Toby Boyd's avatar
Toby Boyd committed
341
    self._run_and_report_benchmark_mlperf_like()
342

A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
  def benchmark_8_gpu_tf_data_ctl_mlperf_like(self):
    """8 GPU using CTL."""
    self._setup()
    FLAGS.keras_use_ctl = True
    FLAGS.num_gpus = 8
    FLAGS.train_epochs = 17
    FLAGS.batch_size = 1048576
    FLAGS.eval_batch_size = 1048000
    FLAGS.learning_rate = 0.0045
    FLAGS.beta1 = 0.25
    FLAGS.beta2 = 0.5
    FLAGS.epsilon = 1e-8
    FLAGS.train_dataset_path = os.path.join(NCF_TF_DATA_1M_BATCH_DIR_NAME, "training_cycle_*/*")
    FLAGS.eval_dataset_path = os.path.join(NCF_TF_DATA_1M_BATCH_DIR_NAME, "eval_data/*")
    FLAGS.input_meta_data_path = os.path.join(NCF_TF_DATA_1M_BATCH_DIR_NAME, "meta_data.json")
    self._run_and_report_benchmark_mlperf_like()

Tomasz Grel's avatar
Tomasz Grel committed
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
  def benchmark_8_gpu_tf_data_fp16_mlperf_like(self):
    """8 GPU FP16"""
    self._setup()
    FLAGS.num_gpus = 8
    FLAGS.train_epochs = 17
    FLAGS.batch_size = 1048576
    FLAGS.eval_batch_size = 1048000
    FLAGS.learning_rate = 0.0045
    FLAGS.beta1 = 0.25
    FLAGS.beta2 = 0.5
    FLAGS.epsilon = 1e-8
    FLAGS.dtype = 'fp16'
    FLAGS.loss_scale = 8192
    FLAGS.train_dataset_path = os.path.join(NCF_TF_DATA_1M_BATCH_DIR_NAME, "training_cycle_*/*")
    FLAGS.eval_dataset_path = os.path.join(NCF_TF_DATA_1M_BATCH_DIR_NAME, "eval_data/*")
    FLAGS.input_meta_data_path = os.path.join(NCF_TF_DATA_1M_BATCH_DIR_NAME, "meta_data.json")
    self._run_and_report_benchmark_mlperf_like()

A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
378
  def benchmark_8_gpu_tf_data_ctl_fp16_mlperf_like(self):
Tomasz Grel's avatar
Tomasz Grel committed
379
    """8 GPU FP16 using CTL"""
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
    self._setup()
    FLAGS.keras_use_ctl = True
    FLAGS.num_gpus = 8
    FLAGS.train_epochs = 17
    FLAGS.batch_size = 1048576
    FLAGS.eval_batch_size = 1048000
    FLAGS.learning_rate = 0.0045
    FLAGS.beta1 = 0.25
    FLAGS.beta2 = 0.5
    FLAGS.epsilon = 1e-8
    FLAGS.dtype = 'fp16'
    FLAGS.loss_scale = 8192
    FLAGS.train_dataset_path = os.path.join(NCF_TF_DATA_1M_BATCH_DIR_NAME, "training_cycle_*/*")
    FLAGS.eval_dataset_path = os.path.join(NCF_TF_DATA_1M_BATCH_DIR_NAME, "eval_data/*")
    FLAGS.input_meta_data_path = os.path.join(NCF_TF_DATA_1M_BATCH_DIR_NAME, "meta_data.json")
    self._run_and_report_benchmark_mlperf_like()
396

397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
  def benchmark_8_gpu_tf_data_ctl_fp16_graph_rewrite_mlperf_like(self):
    """8 GPU FP16 graph rewrite using CTL."""
    self._setup()
    FLAGS.keras_use_ctl = True
    FLAGS.num_gpus = 8
    FLAGS.train_epochs = 17
    FLAGS.batch_size = 1048576
    FLAGS.eval_batch_size = 1048000
    FLAGS.learning_rate = 0.0045
    FLAGS.beta1 = 0.25
    FLAGS.beta2 = 0.5
    FLAGS.epsilon = 1e-8
    FLAGS.dtype = 'fp16'
    FLAGS.fp16_implementation = 'graph_rewrite'
    FLAGS.loss_scale = 8192
    FLAGS.train_dataset_path = os.path.join(NCF_TF_DATA_1M_BATCH_DIR_NAME,
                                            'training_cycle_*/*')
    FLAGS.eval_dataset_path = os.path.join(NCF_TF_DATA_1M_BATCH_DIR_NAME,
                                           'eval_data/*')
    FLAGS.input_meta_data_path = os.path.join(NCF_TF_DATA_1M_BATCH_DIR_NAME,
                                              'meta_data.json')
    self._run_and_report_benchmark_mlperf_like()


421
class NCFKerasSynth(NCFKerasBenchmarkBase):
422
423
424
425
426
427
428
429
430
431
  """Benchmark NCF model using synthetic data."""

  def __init__(self,
               output_dir=None,
               default_flags=None,
               **kwargs):

    default_flags = {}
    default_flags['dataset'] = 'ml-20m'
    default_flags['num_gpus'] = 1
432
433
    default_flags['train_epochs'] = 8
    default_flags['batch_size'] = 99000
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
434
    default_flags['eval_batch_size'] = 160000
435
436
437
438
439
440
441
442
443
    default_flags['learning_rate'] = 0.00382059
    default_flags['beta1'] = 0.783529
    default_flags['beta2'] = 0.909003
    default_flags['epsilon'] = 1.45439e-07
    default_flags['layers'] = [256, 256, 128, 64]
    default_flags['num_factors'] = 64
    default_flags['hr_threshold'] = 0.635
    default_flags['use_synthetic_data'] = True

444
    super(NCFKerasSynth, self).__init__(
445
446
447
448
449
450
451
        output_dir=output_dir,
        default_flags=default_flags,
        **kwargs)

  def benchmark_1_gpu(self):
    self._setup()
    self._run_and_report_benchmark()
452
453
454
455
456

  def benchmark_2_gpus(self):
    self._setup()
    FLAGS.num_gpus = 2
    self._run_and_report_benchmark()
David Chen's avatar
David Chen committed
457
458
459
460


if __name__ == '__main__':
  tf.test.main()