common.py 16.8 KB
Newer Older
1
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2
3
4
5
6
7
8
9
10
11
12
13
14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
15
"""Common util functions and classes used by both keras cifar and imagenet."""
16
17
18
19
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

20
import os
21

Toby Boyd's avatar
Toby Boyd committed
22
from absl import flags
23
import numpy as np
Toby Boyd's avatar
Toby Boyd committed
24
import tensorflow as tf
25

26
from tensorflow.python.keras.optimizer_v2 import gradient_descent as gradient_descent_v2
27
from official.utils.flags import core as flags_core
28
from official.utils.misc import keras_utils
29

Shining Sun's avatar
Shining Sun committed
30
FLAGS = flags.FLAGS
Shining Sun's avatar
Shining Sun committed
31
BASE_LEARNING_RATE = 0.1  # This matches Jing's version.
32
TRAIN_TOP_1 = 'training_accuracy_top_1'
Hongkun Yu's avatar
Hongkun Yu committed
33
34
35
36
37
38
39
LR_SCHEDULE = [    # (multiplier, epoch to start) tuples
    (1.0, 5), (0.1, 30), (0.01, 60), (0.001, 80)
]


def learning_rate_schedule(current_epoch,
                           current_batch,
Zongwei Zhou's avatar
Zongwei Zhou committed
40
                           steps_per_epoch,
Hongkun Yu's avatar
Hongkun Yu committed
41
42
43
44
45
46
47
48
49
                           batch_size):
  """Handles linear scaling rule, gradual warmup, and LR decay.

  Scale learning rate at epoch boundaries provided in LR_SCHEDULE by the
  provided scaling factor.

  Args:
    current_epoch: integer, current epoch indexed from 0.
    current_batch: integer, current batch in the current epoch, indexed from 0.
Zongwei Zhou's avatar
Zongwei Zhou committed
50
    steps_per_epoch: integer, number of steps in an epoch.
Hongkun Yu's avatar
Hongkun Yu committed
51
52
53
54
55
56
    batch_size: integer, total batch sized.

  Returns:
    Adjusted learning rate.
  """
  initial_lr = BASE_LEARNING_RATE * batch_size / 256
Zongwei Zhou's avatar
Zongwei Zhou committed
57
  epoch = current_epoch + float(current_batch) / steps_per_epoch
Hongkun Yu's avatar
Hongkun Yu committed
58
59
60
61
62
63
64
65
66
67
  warmup_lr_multiplier, warmup_end_epoch = LR_SCHEDULE[0]
  if epoch < warmup_end_epoch:
    # Learning rate increases linearly per step.
    return initial_lr * warmup_lr_multiplier * epoch / warmup_end_epoch
  for mult, start_epoch in LR_SCHEDULE:
    if epoch >= start_epoch:
      learning_rate = initial_lr * mult
    else:
      break
  return learning_rate
68

Shining Sun's avatar
Shining Sun committed
69

70
71
72
73
74
class LearningRateBatchScheduler(tf.keras.callbacks.Callback):
  """Callback to update learning rate on every batch (not epoch boundaries).

  N.B. Only support Keras optimizers, not TF optimizers.

75
  Attributes:
76
77
78
79
80
      schedule: a function that takes an epoch index and a batch index as input
          (both integer, indexed from 0) and returns a new learning rate as
          output (float).
  """

Zongwei Zhou's avatar
Zongwei Zhou committed
81
  def __init__(self, schedule, batch_size, steps_per_epoch):
82
83
    super(LearningRateBatchScheduler, self).__init__()
    self.schedule = schedule
Zongwei Zhou's avatar
Zongwei Zhou committed
84
    self.steps_per_epoch = steps_per_epoch
85
86
87
88
89
    self.batch_size = batch_size
    self.epochs = -1
    self.prev_lr = -1

  def on_epoch_begin(self, epoch, logs=None):
90
91
    if not hasattr(self.model.optimizer, 'learning_rate'):
      raise ValueError('Optimizer must have a "learning_rate" attribute.')
92
93
94
    self.epochs += 1

  def on_batch_begin(self, batch, logs=None):
95
    """Executes before step begins."""
96
97
    lr = self.schedule(self.epochs,
                       batch,
Zongwei Zhou's avatar
Zongwei Zhou committed
98
                       self.steps_per_epoch,
99
                       self.batch_size)
100
101
102
    if not isinstance(lr, (float, np.float32, np.float64)):
      raise ValueError('The output of the "schedule" function should be float.')
    if lr != self.prev_lr:
Shining Sun's avatar
Shining Sun committed
103
      self.model.optimizer.learning_rate = lr  # lr should be a float here
104
      self.prev_lr = lr
105
106
107
      tf.compat.v1.logging.debug(
          'Epoch %05d Batch %05d: LearningRateBatchScheduler '
          'change learning rate to %s.', self.epochs, batch, lr)
108

109

110
111
112
113
114
115
116
117
118
119
120
121
class PiecewiseConstantDecayWithWarmup(
    tf.keras.optimizers.schedules.LearningRateSchedule):
  """Piecewise constant decay with warmup schedule."""

  def __init__(self, batch_size, epoch_size, warmup_epochs, boundaries,
               multipliers, compute_lr_on_cpu=True, name=None):
    super(PiecewiseConstantDecayWithWarmup, self).__init__()
    if len(boundaries) != len(multipliers) - 1:
      raise ValueError('The length of boundaries must be 1 less than the '
                       'length of multipliers')

    base_lr_batch_size = 256
Zongwei Zhou's avatar
Zongwei Zhou committed
122
    steps_per_epoch = epoch_size // batch_size
123
124

    self.rescaled_lr = BASE_LEARNING_RATE * batch_size / base_lr_batch_size
Zongwei Zhou's avatar
Zongwei Zhou committed
125
    self.step_boundaries = [float(steps_per_epoch) * x for x in boundaries]
126
    self.lr_values = [self.rescaled_lr * m for m in multipliers]
Zongwei Zhou's avatar
Zongwei Zhou committed
127
    self.warmup_steps = warmup_epochs * steps_per_epoch
128
129
130
    self.compute_lr_on_cpu = compute_lr_on_cpu
    self.name = name

131
    self.learning_rate_ops_cache = {}
132
133
134
135
136
137
138
139

  def __call__(self, step):
    if tf.executing_eagerly():
      return self._get_learning_rate(step)

    # In an eager function or graph, the current implementation of optimizer
    # repeatedly call and thus create ops for the learning rate schedule. To
    # avoid this, we cache the ops if not executing eagerly.
140
141
    graph = tf.compat.v1.get_default_graph()
    if graph not in self.learning_rate_ops_cache:
142
143
      if self.compute_lr_on_cpu:
        with tf.device('/device:CPU:0'):
144
          self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
145
      else:
146
147
        self.learning_rate_ops_cache[graph] = self._get_learning_rate(step)
    return self.learning_rate_ops_cache[graph]
148
149
150

  def _get_learning_rate(self, step):
    """Compute learning rate at given step."""
Haoyu Zhang's avatar
Haoyu Zhang committed
151
152
153
154
    with tf.compat.v1.name_scope(self.name, 'PiecewiseConstantDecayWithWarmup',
                                 [self.rescaled_lr, self.step_boundaries,
                                  self.lr_values, self.warmup_steps,
                                  self.compute_lr_on_cpu]):
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
      def warmup_lr(step):
        return self.rescaled_lr * (
            tf.cast(step, tf.float32) / tf.cast(self.warmup_steps, tf.float32))
      def piecewise_lr(step):
        return tf.compat.v1.train.piecewise_constant(
            step, self.step_boundaries, self.lr_values)
      return tf.cond(step < self.warmup_steps,
                     lambda: warmup_lr(step),
                     lambda: piecewise_lr(step))

  def get_config(self):
    return {
        'rescaled_lr': self.rescaled_lr,
        'step_boundaries': self.step_boundaries,
        'lr_values': self.lr_values,
        'warmup_steps': self.warmup_steps,
        'compute_lr_on_cpu': self.compute_lr_on_cpu,
        'name': self.name
    }


def get_optimizer(learning_rate=0.1):
177
178
  """Returns optimizer to use."""
  # The learning_rate is overwritten at the beginning of each step by callback.
179
  return gradient_descent_v2.SGD(learning_rate=learning_rate, momentum=0.9)
180
181


Hongkun Yu's avatar
Hongkun Yu committed
182
# TODO(hongkuny,haoyuzhang): make cifar model use_tensor_lr to clean up code.
Hongkun Yu's avatar
Hongkun Yu committed
183
def get_callbacks(steps_per_epoch, learning_rate_schedule_fn=None):
184
  """Returns common callbacks."""
185
  time_callback = keras_utils.TimeHistory(FLAGS.batch_size, FLAGS.log_steps)
186
187
  callbacks = [time_callback]

Hongkun Yu's avatar
Hongkun Yu committed
188
  if not FLAGS.use_tensor_lr and learning_rate_schedule_fn:
189
190
191
    lr_callback = LearningRateBatchScheduler(
        learning_rate_schedule_fn,
        batch_size=FLAGS.batch_size,
Zongwei Zhou's avatar
Zongwei Zhou committed
192
        steps_per_epoch=steps_per_epoch)
193
    callbacks.append(lr_callback)
194
195
196
197
198
199
200

  if FLAGS.enable_tensorboard:
    tensorboard_callback = tf.keras.callbacks.TensorBoard(
        log_dir=FLAGS.model_dir)
    callbacks.append(tensorboard_callback)

  if FLAGS.profile_steps:
201
202
203
    profiler_callback = keras_utils.get_profiler_callback(
        FLAGS.model_dir,
        FLAGS.profile_steps,
Zongwei Zhou's avatar
Zongwei Zhou committed
204
205
        FLAGS.enable_tensorboard,
        steps_per_epoch)
206
207
208
209
210
211
    callbacks.append(profiler_callback)

  return callbacks


def build_stats(history, eval_output, callbacks):
212
213
214
215
216
217
218
  """Normalizes and returns dictionary of stats.

  Args:
    history: Results of the training step. Supports both categorical_accuracy
      and sparse_categorical_accuracy.
    eval_output: Output of the eval step. Assumes first value is eval_loss and
      second value is accuracy_top_1.
219
220
    callbacks: a list of callbacks which might include a time history callback
      used during keras.fit.
221
222
223
224
225
226
227
228

  Returns:
    Dictionary of normalized results.
  """
  stats = {}
  if eval_output:
    stats['accuracy_top_1'] = eval_output[1].item()
    stats['eval_loss'] = eval_output[0].item()
229

230
231
232
233
234
235
236
237
238
239
  if history and history.history:
    train_hist = history.history
    # Gets final loss from training.
    stats['loss'] = train_hist['loss'][-1].item()
    # Gets top_1 training accuracy.
    if 'categorical_accuracy' in train_hist:
      stats[TRAIN_TOP_1] = train_hist['categorical_accuracy'][-1].item()
    elif 'sparse_categorical_accuracy' in train_hist:
      stats[TRAIN_TOP_1] = train_hist['sparse_categorical_accuracy'][-1].item()

240
241
242
243
244
245
246
247
248
249
250
251
252
253
  if not callbacks:
    return stats

  # Look for the time history callback which was used during keras.fit
  for callback in callbacks:
    if isinstance(callback, keras_utils.TimeHistory):
      timestamp_log = callback.timestamp_log
      stats['step_timestamp_log'] = timestamp_log
      stats['train_finish_time'] = callback.train_finish_time
      if len(timestamp_log) > 1:
        stats['avg_exp_per_second'] = (
            callback.batch_size * callback.log_steps *
            (len(callback.timestamp_log)-1) /
            (timestamp_log[-1].timestamp - timestamp_log[0].timestamp))
254
255
256
  return stats


257
def define_keras_flags(dynamic_loss_scale=True):
258
  """Define flags for Keras models."""
259
260
261
  flags_core.define_base(clean=True, num_gpu=True, run_eagerly=True,
                         train_epochs=True, epochs_between_evals=True,
                         distribution_strategy=True)
262
  flags_core.define_performance(num_parallel_calls=False,
263
264
265
266
                                synthetic_data=True,
                                dtype=True,
                                all_reduce_alg=True,
                                num_packs=True,
267
268
269
270
                                tf_gpu_thread_mode=True,
                                datasets_num_private_threads=True,
                                dynamic_loss_scale=dynamic_loss_scale,
                                loss_scale=True,
271
                                fp16_implementation=True,
272
                                tf_data_experimental_slack=True,
273
                                enable_xla=True,
274
275
                                force_v2_in_keras_compile=True,
                                training_dataset_cache=True)
276
277
  flags_core.define_image()
  flags_core.define_benchmark()
278
  flags_core.define_distribution()
279
  flags.adopt_module_key_flags(flags_core)
280

Shining Sun's avatar
Shining Sun committed
281
  flags.DEFINE_boolean(name='enable_eager', default=False, help='Enable eager?')
282
  flags.DEFINE_boolean(name='skip_eval', default=False, help='Skip evaluation?')
283
284
285
286
287
288
289
290
291
  # TODO(b/135607288): Remove this flag once we understand the root cause of
  # slowdown when setting the learning phase in Keras backend.
  flags.DEFINE_boolean(
      name='set_learning_phase_to_train', default=True,
      help='If skip eval, also set Keras learning phase to 1 (training).')
  flags.DEFINE_boolean(
      name='explicit_gpu_placement', default=False,
      help='If not using distribution strategy, explicitly set device scope '
      'for the Keras training loop.')
Haoyu Zhang's avatar
Haoyu Zhang committed
292
293
  flags.DEFINE_boolean(name='use_trivial_model', default=False,
                       help='Whether to use a trivial Keras model.')
294
295
  flags.DEFINE_boolean(name='report_accuracy_metrics', default=True,
                       help='Report metrics during training and evaluation.')
296
297
  flags.DEFINE_boolean(name='use_tensor_lr', default=False,
                       help='Use learning rate tensor instead of a callback.')
298
299
300
  flags.DEFINE_boolean(
      name='enable_tensorboard', default=False,
      help='Whether to enable Tensorboard callback.')
Shining Sun's avatar
Shining Sun committed
301
  flags.DEFINE_integer(
302
303
      name='train_steps', default=None,
      help='The number of steps to run for training. If it is larger than '
304
305
      '# batches per epoch, then use # batches per epoch. This flag will be '
      'ignored if train_epochs is set to be larger than 1. ')
306
307
  flags.DEFINE_string(
      name='profile_steps', default=None,
Zongwei Zhou's avatar
Zongwei Zhou committed
308
      help='Save profiling data to model dir at given range of global steps. The '
309
310
311
312
313
      'value must be a comma separated pair of positive integers, specifying '
      'the first and last step to profile. For example, "--profile_steps=2,4" '
      'triggers the profiler to process 3 steps, starting from the 2nd step. '
      'Note that profiler has a non-trivial performance overhead, and the '
      'output file can be gigantic if profiling many steps.')
314
315
316
  flags.DEFINE_boolean(
      name='batchnorm_spatial_persistent', default=True,
      help='Enable the spacial persistent mode for CuDNN batch norm kernel.')
317
318
319
  flags.DEFINE_boolean(
      name='enable_get_next_as_optional', default=False,
      help='Enable get_next_as_optional behavior in DistributedIterator.')
Hongkun Yu's avatar
Hongkun Yu committed
320
321
322
  flags.DEFINE_boolean(
      name='enable_checkpoint_and_export', default=False,
      help='Whether to enable a checkpoint callback and export the savedmodel.')
Jing Li's avatar
Jing Li committed
323
324
325
326
327
328
329
  flags.DEFINE_string(
      name='tpu', default='', help='TPU address to connect to.')
  flags.DEFINE_integer(
      name='steps_per_loop', default=1,
      help='Number of steps per graph-mode loop. Only training step happens '
      'inside the loop. Callbacks will not be called inside. Will be capped at '
      'steps per epoch.')
Shining Sun's avatar
Shining Sun committed
330

331

Allen Wang's avatar
Allen Wang committed
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
def get_synth_data(height, width, num_channels, num_classes, dtype):
  """Creates a set of synthetic random data.

  Args:
    height: Integer height that will be used to create a fake image tensor.
    width: Integer width that will be used to create a fake image tensor.
    num_channels: Integer depth that will be used to create a fake image tensor.
    num_classes: Number of classes that should be represented in the fake labels
      tensor
    dtype: Data type for features/images.

  Returns:
    A tuple of tensors representing the inputs and labels.

  """
  # Synthetic input should be within [0, 255].
  inputs = tf.random.truncated_normal([height, width, num_channels],
                                      dtype=dtype,
                                      mean=127,
                                      stddev=60,
                                      name='synthetic_inputs')
  labels = tf.random.uniform([1],
                             minval=0,
                             maxval=num_classes - 1,
                             dtype=tf.int32,
                             name='synthetic_labels')
  return inputs, labels


Shining Sun's avatar
Shining Sun committed
361
def get_synth_input_fn(height, width, num_channels, num_classes,
362
                       dtype=tf.float32, drop_remainder=True):
Shining Sun's avatar
Shining Sun committed
363
364
365
366
367
  """Returns an input function that returns a dataset with random data.

  This input_fn returns a data set that iterates over a set of random data and
  bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
  copy is still included. This used to find the upper throughput bound when
Shining Sun's avatar
Shining Sun committed
368
  tuning the full input pipeline.
Shining Sun's avatar
Shining Sun committed
369
370
371
372
373
374
375
376

  Args:
    height: Integer height that will be used to create a fake image tensor.
    width: Integer width that will be used to create a fake image tensor.
    num_channels: Integer depth that will be used to create a fake image tensor.
    num_classes: Number of classes that should be represented in the fake labels
      tensor
    dtype: Data type for features/images.
377
378
    drop_remainder: A boolean indicates whether to drop the remainder of the
      batches. If True, the batch dimension will be static.
Shining Sun's avatar
Shining Sun committed
379
380
381
382
383
384
385
386

  Returns:
    An input_fn that can be used in place of a real one to return a dataset
    that can be used for iteration.
  """
  # pylint: disable=unused-argument
  def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
    """Returns dataset filled with random data."""
Allen Wang's avatar
Allen Wang committed
387
388
389
390
391
    inputs, labels = get_synth_data(height=height,
                                    width=width,
                                    num_channels=num_channels,
                                    num_classes=num_classes,
                                    dtype=dtype)
392
393
    # Cast to float32 for Keras model.
    labels = tf.cast(labels, dtype=tf.float32)
Shining Sun's avatar
Shining Sun committed
394
    data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
395
396

    # `drop_remainder` will make dataset produce outputs with known shapes.
397
    data = data.batch(batch_size, drop_remainder=drop_remainder)
398
    data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
Shining Sun's avatar
Shining Sun committed
399
400
401
    return data

  return input_fn
Shining Sun's avatar
Shining Sun committed
402
403


404
def set_cudnn_batchnorm_mode():
Toby Boyd's avatar
Toby Boyd committed
405
406
407
408
409
  """Set CuDNN batchnorm mode for better performance.

     Note: Spatial Persistent mode may lead to accuracy losses for certain
     models.
  """
410
411
412
  if FLAGS.batchnorm_spatial_persistent:
    os.environ['TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT'] = '1'
  else:
413
    os.environ.pop('TF_USE_CUDNN_BATCHNORM_SPATIAL_PERSISTENT', None)