common.py 19.3 KB
Newer Older
1
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3
4
5
6
7
8
9
10
11
12
13
14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
15
"""Common util functions and classes used by both keras cifar and imagenet."""
16
17
18
19
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

20
21
import multiprocessing
import os
22

Toby Boyd's avatar
Toby Boyd committed
23
from absl import flags
24
import numpy as np
Toby Boyd's avatar
Toby Boyd committed
25
import tensorflow as tf
26

27
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
28
from official.utils.flags import core as flags_core
29
from official.utils.misc import keras_utils
30

Shining Sun's avatar
Shining Sun committed
31
FLAGS = flags.FLAGS
Shining Sun's avatar
Shining Sun committed
32
BASE_LEARNING_RATE = 0.1  # This matches Jing's version.
33
TRAIN_TOP_1 = 'training_accuracy_top_1'
Hongkun Yu's avatar
Hongkun Yu committed
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
LR_SCHEDULE = [    # (multiplier, epoch to start) tuples
    (1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)
]


def learning_rate_schedule(current_epoch,
                           current_batch,
                           batches_per_epoch,
                           batch_size):
  """Handles linear scaling rule, gradual warmup, and LR decay.

  Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the
  provided scaling factor.

  Args:
    current_epoch: integer, current epoch indexed from 0.
    current_batch: integer, current batch in the current epoch, indexed from 0.
    batches_per_epoch: integer, number of steps in an epoch.
    batch_size: integer, total batch sized.

  Returns:
    Adjusted learning rate.
  """
  initial_lr = BASE_LEARNING_RATE * batch_size / 256
  epoch = current_epoch + float(current_batch) / batches_per_epoch
  warmup_lr_multiplier, warmup_end_epoch = LR_SCHEDULE[0]
  if epoch < warmup_end_epoch:
    # Learning rate increases linearly per step.
    return initial_lr * warmup_lr_multiplier * epoch / warmup_end_epoch
  for mult, start_epoch in LR_SCHEDULE:
    if epoch >= start_epoch:
      learning_rate = initial_lr * mult
    else:
      break
  return learning_rate
69

Shining Sun's avatar
Shining Sun committed
70

71
72
73
74
75
class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
  """Callback to update learning rate on every batch (not epoch boundaries).

  N.B. Only support Keras optimizers, not TF optimizers.

76
  Attributes:
77
78
79
80
81
82
83
84
85
86
87
88
89
90
      schedule: a function that takes an epoch index and a batch index as input
          (both integer, indexed from 0) and returns a new learning rate as
          output (float).
  """

  def __init__(self, schedule, batch_size, num_images):
    super(LearningRateBatchScheduler, self).__init__()
    self.schedule = schedule
    self.batches_per_epoch = num_images / batch_size
    self.batch_size = batch_size
    self.epochs = -1
    self.prev_lr = -1

  def on_epoch_begin(self, epoch, logs=None):
91
92
    if not hasattr(self.model.optimizer, 'learning_rate'):
      raise ValueError('Optimizer must have a "learning_rate" attribute.')
93
94
95
    self.epochs += 1

  def on_batch_begin(self, batch, logs=None):
96
    """Executes before step begins."""
97
98
99
100
    lr = self.schedule(self.epochs,
                       batch,
                       self.batches_per_epoch,
                       self.batch_size)
101
102
103
    if not isinstance(lr, (float, np.float32, np.float64)):
      raise ValueError('The output of the "schedule" function should be float.')
    if lr != self.prev_lr:
Shining Sun's avatar
Shining Sun committed
104
      self.model.optimizer.learning_rate = lr  # lr should be a float here
105
      self.prev_lr = lr
106
107
108
      tf.compat.v1.logging.debug(
          'Epoch %05d Batch %05d: LearningRateBatchScheduler '
          'change learning rate to %s.', self.epochs, batch, lr)
109

110

111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
class PiecewiseConstantDecayWithWarmup(
    tf.keras.optimizers.schedules.LearningRateSchedule):
  """Piecewise constant decay with warmup schedule."""

  def __init__(self, batch_size, epoch_size, warmup_epochs, boundaries,
               multipliers, compute_lr_on_cpu=True, name=None):
    super(PiecewiseConstantDecayWithWarmup, self).__init__()
    if len(boundaries) != len(multipliers) - 1:
      raise ValueError('The length of boundaries must be 1 less than the '
                       'length of multipliers')

    base_lr_batch_size = 256
    num_batches_per_epoch = epoch_size // batch_size

    self.rescaled_lr = BASE_LEARNING_RATE * batch_size / base_lr_batch_size
    self.step_boundaries = [float(num_batches_per_epoch) * x
                            for x in boundaries]
    self.lr_values = [self.rescaled_lr * m for m in multipliers]
    self.warmup_steps = warmup_epochs * num_batches_per_epoch
    self.compute_lr_on_cpu = compute_lr_on_cpu
    self.name = name

133
    self.learning_rate_ops_cache = {}
134
135
136
137
138
139
140
141

  def __call__(self, step):
    if tf.executing_eagerly():
      return self._get_learning_rate(step)

    # In an eager function or graph, the current implementation of optimizer
    # repeatedly call and thus create ops for the learning rate schedule. To
    # avoid this, we cache the ops if not executing eagerly.
142
143
    graph = tf.compat.v1.get_default_graph()
    if graph not in self.learning_rate_ops_cache:
144
145
      if self.compute_lr_on_cpu:
        with tf.device('/device:CPU:0'):
146
          self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
147
      else:
148
149
        self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
    return self.learning_rate_ops_cache[graph]
150
151
152

  def _get_learning_rate(self, step):
    """Compute learning rate at given step."""
Haoyu Zhang's avatar
Haoyu Zhang committed
153
154
155
156
    with tf.compat.v1.name_scope(self.name, 'PiecewiseConstantDecayWithWarmup',
                                 [self.rescaled_lr, self.step_boundaries,
                                  self.lr_values, self.warmup_steps,
                                  self.compute_lr_on_cpu]):
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
      def warmup_lr(step):
        return self.rescaled_lr * (
            tf.cast(step, tf.float32) / tf.cast(self.warmup_steps, tf.float32))
      def piecewise_lr(step):
        return tf.compat.v1.train.piecewise_constant(
            step, self.step_boundaries, self.lr_values)
      return tf.cond(step < self.warmup_steps,
                     lambda: warmup_lr(step),
                     lambda: piecewise_lr(step))

  def get_config(self):
    return {
        'rescaled_lr': self.rescaled_lr,
        'step_boundaries': self.step_boundaries,
        'lr_values': self.lr_values,
        'warmup_steps': self.warmup_steps,
        'compute_lr_on_cpu': self.compute_lr_on_cpu,
        'name': self.name
    }


178
179
180
181
182
183
def set_gpu_thread_mode_and_count(flags_obj):
  """Set GPU thread mode and count, and adjust dataset threads count."""
  cpu_count = multiprocessing.cpu_count()
  tf.compat.v1.logging.info('Logical CPU cores: %s', cpu_count)

  # Allocate private thread pool for each GPU to schedule and launch kernels
184
  per_gpu_thread_count = flags_obj.per_gpu_thread_count or 2
185
186
187
188
189
190
191
192
193
194
  os.environ['TF_GPU_THREAD_MODE'] = flags_obj.tf_gpu_thread_mode
  os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
  tf.compat.v1.logging.info('TF_GPU_THREAD_COUNT: %s',
                            os.environ['TF_GPU_THREAD_COUNT'])
  tf.compat.v1.logging.info('TF_GPU_THREAD_MODE: %s',
                            os.environ['TF_GPU_THREAD_MODE'])

  # Limit data preprocessing threadpool to CPU cores minus number of total GPU
  # private threads and memory copy threads.
  total_gpu_thread_count = per_gpu_thread_count * flags_obj.num_gpus
195
  num_runtime_threads = flags_obj.num_gpus
196
  if not flags_obj.datasets_num_private_threads:
197
198
199
    flags_obj.datasets_num_private_threads = min(
        cpu_count - total_gpu_thread_count - num_runtime_threads,
        flags_obj.num_gpus * 8)
200
201
202
203
    tf.compat.v1.logging.info('Set datasets_num_private_threads to %s',
                              flags_obj.datasets_num_private_threads)


204
def get_optimizer(learning_rate=0.1):
205
206
  """Returns optimizer to use."""
  # The learning_rate is overwritten at the beginning of each step by callback.
207
  return gradient_descent_v2.SGD(learning_rate=learning_rate, momentum=0.9)
208
209


Hongkun Yu's avatar
Hongkun Yu committed
210
211
# TODO(hongkuny,haoyuzhang): make cifar model use_tensor_lr to clean up code.
def get_callbacks(learning_rate_schedule_fn=None, num_images=None):
212
  """Returns common callbacks."""
213
  time_callback = keras_utils.TimeHistory(FLAGS.batch_size, FLAGS.log_steps)
214
215
  callbacks = [time_callback]

Hongkun Yu's avatar
Hongkun Yu committed
216
  if not FLAGS.use_tensor_lr and learning_rate_schedule_fn:
217
218
219
220
221
    lr_callback = LearningRateBatchScheduler(
        learning_rate_schedule_fn,
        batch_size=FLAGS.batch_size,
        num_images=num_images)
    callbacks.append(lr_callback)
222
223
224
225
226
227
228

  if FLAGS.enable_tensorboard:
    tensorboard_callback = tf.keras.callbacks.TensorBoard(
        log_dir=FLAGS.model_dir)
    callbacks.append(tensorboard_callback)

  if FLAGS.profile_steps:
229
230
231
232
    profiler_callback = keras_utils.get_profiler_callback(
        FLAGS.model_dir,
        FLAGS.profile_steps,
        FLAGS.enable_tensorboard)
233
234
235
236
237
238
    callbacks.append(profiler_callback)

  return callbacks


def build_stats(history, eval_output, callbacks):
239
240
241
242
243
244
245
  """Normalizes and returns dictionary of stats.

  Args:
    history: Results of the training step. Supports both categorical_accuracy
      and sparse_categorical_accuracy.
    eval_output: Output of the eval step. Assumes first value is eval_loss and
      second value is accuracy_top_1.
246
247
    callbacks: a list of callbacks which might include a time history callback
      used during keras.fit.
248
249
250
251
252
253
254
255

  Returns:
    Dictionary of normalized results.
  """
  stats = {}
  if eval_output:
    stats['accuracy_top_1'] = eval_output[1].item()
    stats['eval_loss'] = eval_output[0].item()
256

257
258
259
260
261
262
263
264
265
266
  if history and history.history:
    train_hist = history.history
    # Gets final loss from training.
    stats['loss'] = train_hist['loss'][-1].item()
    # Gets top_1 training accuracy.
    if 'categorical_accuracy' in train_hist:
      stats[TRAIN_TOP_1] = train_hist['categorical_accuracy'][-1].item()
    elif 'sparse_categorical_accuracy' in train_hist:
      stats[TRAIN_TOP_1] = train_hist['sparse_categorical_accuracy'][-1].item()

267
268
269
270
271
272
273
274
275
276
277
278
279
280
  if not callbacks:
    return stats

  # Look for the time history callback which was used during keras.fit
  for callback in callbacks:
    if isinstance(callback, keras_utils.TimeHistory):
      timestamp_log = callback.timestamp_log
      stats['step_timestamp_log'] = timestamp_log
      stats['train_finish_time'] = callback.train_finish_time
      if len(timestamp_log) > 1:
        stats['avg_exp_per_second'] = (
            callback.batch_size * callback.log_steps *
            (len(callback.timestamp_log)-1) /
            (timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
281
282
283
  return stats


284
def define_keras_flags(dynamic_loss_scale=True):
285
  """Define flags for Keras models."""
286
287
288
  flags_core.define_base(clean=True, num_gpu=True, run_eagerly=True,
                         train_epochs=True, epochs_between_evals=True,
                         distribution_strategy=True)
289
  flags_core.define_performance(num_parallel_calls=False,
290
291
292
293
                                synthetic_data=True,
                                dtype=True,
                                all_reduce_alg=True,
                                num_packs=True,
294
295
296
297
                                tf_gpu_thread_mode=True,
                                datasets_num_private_threads=True,
                                dynamic_loss_scale=dynamic_loss_scale,
                                loss_scale=True,
298
                                fp16_implementation=True,
299
                                tf_data_experimental_slack=True,
300
301
                                enable_xla=True,
                                force_v2_in_keras_compile=True)
302
303
  flags_core.define_image()
  flags_core.define_benchmark()
304
  flags_core.define_distribution()
305
  flags.adopt_module_key_flags(flags_core)
306

Shining Sun's avatar
Shining Sun committed
307
  flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?')
308
  flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?')
309
310
311
312
313
314
315
316
317
  # TODO(b/135607288): Remove this flag once we understand the root cause of
  # slowdown when setting the learning phase in Keras backend.
  flags.DEFINE_boolean(
      name='set_learning_phase_to_train', default=True,
      help='If skip eval, also set Keras learning phase to 1 (training).')
  flags.DEFINE_boolean(
      name='explicit_gpu_placement', default=False,
      help='If not using distribution strategy, explicitly set device scope '
      'for the Keras training loop.')
Haoyu Zhang's avatar
Haoyu Zhang committed
318
319
  flags.DEFINE_boolean(name='use_trivial_model', default=False,
                       help='Whether to use a trivial Keras model.')
320
321
  flags.DEFINE_boolean(name='report_accuracy_metrics', default=True,
                       help='Report metrics during training and evaluation.')
322
323
  flags.DEFINE_boolean(name='use_tensor_lr', default=False,
                       help='Use learning rate tensor instead of a callback.')
324
325
326
  flags.DEFINE_boolean(
      name='enable_tensorboard', default=False,
      help='Whether to enable Tensorboard callback.')
Shining Sun's avatar
Shining Sun committed
327
  flags.DEFINE_integer(
328
329
      name='train_steps', default=None,
      help='The number of steps to run for training. If it is larger than '
Shining Sun's avatar
Shining Sun committed
330
      '# batches per epoch, then use # batches per epoch. When this flag is '
331
      'set, only one epoch is going to run for training.')
332
333
334
335
336
337
338
339
  flags.DEFINE_string(
      name='profile_steps', default=None,
      help='Save profiling data to model dir at given range of steps. The '
      'value must be a comma separated pair of positive integers, specifying '
      'the first and last step to profile. For example, "--profile_steps=2,4" '
      'triggers the profiler to process 3 steps, starting from the 2nd step. '
      'Note that profiler has a non-trivial performance overhead, and the '
      'output file can be gigantic if profiling many steps.')
340
  flags.DEFINE_boolean(
341
      name='data_delay_prefetch', default=False,
342
343
344
345
346
      help='Add a small delay in tf.data prefetch to prioritize memory copy of '
      'other tensors over the data minibatch for the (T+1)th step. It should '
      'help improve performance using EagerIterator and function. The codepath '
      'when enabling this feature is experimental and will be removed once the '
      'corresponding performance features are fully supported in TensorFlow.')
347
348
349
  flags.DEFINE_boolean(
      name='batchnorm_spatial_persistent', default=True,
      help='Enable the spacial persistent mode for CuDNN batch norm kernel.')
350
351
352
  flags.DEFINE_boolean(
      name='enable_get_next_as_optional', default=False,
      help='Enable get_next_as_optional behavior in DistributedIterator.')
Hongkun Yu's avatar
Hongkun Yu committed
353
354
355
  flags.DEFINE_boolean(
      name='enable_checkpoint_and_export', default=False,
      help='Whether to enable a checkpoint callback and export the savedmodel.')
Jing Li's avatar
Jing Li committed
356
357
358
359
360
361
362
  flags.DEFINE_string(
      name='tpu', default='', help='TPU address to connect to.')
  flags.DEFINE_integer(
      name='steps_per_loop', default=1,
      help='Number of steps per graph-mode loop. Only training step happens '
      'inside the loop. Callbacks will not be called inside. Will be capped at '
      'steps per epoch.')
Shining Sun's avatar
Shining Sun committed
363

364

Shining Sun's avatar
Shining Sun committed
365
def get_synth_input_fn(height, width, num_channels, num_classes,
366
                       dtype=tf.float32, drop_remainder=True):
Shining Sun's avatar
Shining Sun committed
367
368
369
370
371
  """Returns an input function that returns a dataset with random data.

  This input_fn returns a data set that iterates over a set of random data and
  bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
  copy is still included. This used to find the upper throughput bound when
Shining Sun's avatar
Shining Sun committed
372
  tuning the full input pipeline.
Shining Sun's avatar
Shining Sun committed
373
374
375
376
377
378
379
380

  Args:
    height: Integer height that will be used to create a fake image tensor.
    width: Integer width that will be used to create a fake image tensor.
    num_channels: Integer depth that will be used to create a fake image tensor.
    num_classes: Number of classes that should be represented in the fake labels
      tensor
    dtype: Data type for features/images.
381
382
    drop_remainder: A boolean indicates whether to drop the remainder of the
      batches. If True, the batch dimension will be static.
Shining Sun's avatar
Shining Sun committed
383
384
385
386
387
388
389
390
391

  Returns:
    An input_fn that can be used in place of a real one to return a dataset
    that can be used for iteration.
  """
  # pylint: disable=unused-argument
  def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
    """Returns dataset filled with random data."""
    # Synthetic input should be within [0, 255].
392
393
394
395
396
397
398
399
400
401
    inputs = tf.random.truncated_normal([height, width, num_channels],
                                        dtype=dtype,
                                        mean=127,
                                        stddev=60,
                                        name='synthetic_inputs')
    labels = tf.random.uniform([1],
                               minval=0,
                               maxval=num_classes - 1,
                               dtype=tf.int32,
                               name='synthetic_labels')
402
403
404
    # Cast to float32 for Keras model.
    labels = tf.cast(labels, dtype=tf.float32)

Shining Sun's avatar
Shining Sun committed
405
    data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
406
407

    # `drop_remainder` will make dataset produce outputs with known shapes.
408
    data = data.batch(batch_size, drop_remainder=drop_remainder)
409
    data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
Shining Sun's avatar
Shining Sun committed
410
411
412
    return data

  return input_fn
Shining Sun's avatar
Shining Sun committed
413
414


415
def data_delay_prefetch():
416
417
418
419
420
  """Use unstable code for perf tuning purposes."""
  if not FLAGS.use_synthetic_data:
    _monkey_patch_org_create_device_dataset()


421
def set_cudnn_batchnorm_mode():
Toby Boyd's avatar
Toby Boyd committed
422
423
424
425
426
  """Set CuDNN batchnorm mode for better performance.

     Note: Spatial Persistent mode may lead to accuracy losses for certain
     models.
  """
427
428
429
  if FLAGS.batchnorm_spatial_persistent:
    os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
  else:
430
    os.environ.pop('TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT', None)
431
432


433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
# TODO(haoyuzhang): remove this monkey patch when the "prefetch with slack"
# feature is available in tf.data.
def _monkey_patch_org_create_device_dataset():
  """Monkey-patch `_create_device_dataset` method with delayed prefetch."""

  import ast  # pylint: disable=g-import-not-at-top
  import inspect  # pylint: disable=g-import-not-at-top
  from tensorflow.python.data.ops import multi_device_iterator_ops  # pylint: disable=g-import-not-at-top

  tf.compat.v1.logging.info(
      'Using monkey-patched version of MultiDeviceIterator. It should be '
      'removed when the prefetch with slack feature is implemented in tf.data.')
  cls_multi_device_iterator = ast.parse(
      inspect.getsource(multi_device_iterator_ops.MultiDeviceIterator))
  org_create_device_dataset_code = inspect.getsource(
      multi_device_iterator_ops.MultiDeviceIterator._create_device_dataset)  # pylint: disable=protected-access
  code_lines = org_create_device_dataset_code.split('\n')
  # Insert in reverse order to avoid line number shift by previous insertions
  code_lines.insert(5, '      ds = ds.apply(sleep_ops.sleep(11000))')  # 11ms
  code_lines.insert(2, '    from tensorflow.python.data.experimental.ops import sleep as sleep_ops')  # pylint: disable=line-too-long
  patched_code = '\n'.join(line[2:] for line in code_lines)
  cls_multi_device_iterator.body[0].body[2] = ast.parse(patched_code).body[0]
  exec(compile(cls_multi_device_iterator, '<string>', 'exec'),  # pylint: disable=exec-used
       multi_device_iterator_ops.__dict__)