"vscode:/vscode.git/clone" did not exist on "ddb0c2301a316855e060b753659bc191cd26b34a"
resnet_run_loop.py 33.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains utility and supporting functions for ResNet.

  This module contains ResNet code which does not directly build layers. This
includes dataset management, hyperparameter and optimizer code, and argument
parsing. Code for defining the ResNet layers can be found in resnet_model.py.
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

26
import functools
Taylor Robie's avatar
Taylor Robie committed
27
import math
Toby Boyd's avatar
Toby Boyd committed
28
import multiprocessing
29
30
import os

31
from absl import flags
32
import tensorflow as tf
33

34
35
from official.r1.resnet import imagenet_preprocessing
from official.r1.resnet import resnet_model
36
from official.r1.utils import export
37
from official.utils.flags import core as flags_core
38
39
from official.utils.logs import hooks_helper
from official.utils.logs import logger
40
from official.utils.misc import distribution_utils
41
from official.utils.misc import model_helpers
42
43
44
45
46


################################################################################
# Functions for input processing.
################################################################################
Toby Boyd's avatar
Toby Boyd committed
47
48
49
50
51
52
53
54
def process_record_dataset(dataset,
                           is_training,
                           batch_size,
                           shuffle_buffer,
                           parse_record_fn,
                           num_epochs=1,
                           dtype=tf.float32,
                           datasets_num_private_threads=None,
55
                           drop_remainder=False,
Rachel Lim's avatar
Rachel Lim committed
56
                           tf_data_experimental_slack=False):
Karmel Allison's avatar
Karmel Allison committed
57
  """Given a Dataset with raw records, return an iterator over the records.
58
59
60
61
62
63
64
65
66
67
68

  Args:
    dataset: A Dataset representing raw records
    is_training: A boolean denoting whether the input is for training.
    batch_size: The number of samples per batch.
    shuffle_buffer: The buffer size to use when shuffling records. A larger
      value results in better randomness, but smaller values reduce startup
      time and use less memory.
    parse_record_fn: A function that takes a raw record and returns the
      corresponding (image, label) pair.
    num_epochs: The number of epochs to repeat the dataset.
69
    dtype: Data type to use for images/features.
Toby Boyd's avatar
Toby Boyd committed
70
71
    datasets_num_private_threads: Number of threads for a private
      threadpool created for all datasets computation.
72
73
    drop_remainder: A boolean indicates whether to drop the remainder of the
      batches. If True, the batch dimension will be static.
74
75
    tf_data_experimental_slack: Whether to enable tf.data's
      `experimental_slack` option.
76
77
78
79

  Returns:
    Dataset of (image, label) pairs ready for iteration.
  """
80
81
82
83
84
85
86
87
  # Defines a specific size thread pool for tf.data operations.
  if datasets_num_private_threads:
    options = tf.data.Options()
    options.experimental_threading.private_threadpool_size = (
        datasets_num_private_threads)
    dataset = dataset.with_options(options)
    tf.compat.v1.logging.info('datasets_num_private_threads: %s',
                              datasets_num_private_threads)
88

Haoyu Zhang's avatar
Haoyu Zhang committed
89
90
91
92
93
  # Disable intra-op parallelism to optimize for throughput instead of latency.
  options = tf.data.Options()
  options.experimental_threading.max_intra_op_parallelism = 1
  dataset = dataset.with_options(options)

94
95
  # Prefetches a batch at a time to smooth out the time taken to load input
  # files for shuffling and processing.
96
97
  dataset = dataset.prefetch(buffer_size=batch_size)
  if is_training:
98
    # Shuffles records before repeating to respect epoch boundaries.
99
100
    dataset = dataset.shuffle(buffer_size=shuffle_buffer)

101
  # Repeats the dataset for the number of epochs to train.
102
103
  dataset = dataset.repeat(num_epochs)

104
  # Parses the raw records into images and labels.
Haoyu Zhang's avatar
Haoyu Zhang committed
105
106
107
  dataset = dataset.map(
      lambda value: parse_record_fn(value, is_training, dtype),
      num_parallel_calls=tf.data.experimental.AUTOTUNE)
108
  dataset = dataset.batch(batch_size, drop_remainder=drop_remainder)
109
110
111
112

  # Operations between the final prefetch and the get_next call to the iterator
  # will happen synchronously during run time. We prefetch here again to
  # background all of the above processing work and keep it out of the
113
  # critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE
114
115
  # allows DistributionStrategies to adjust how many batches to fetch based
  # on how many devices are present.
116
  dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
117

118
119
120
121
122
  if tf_data_experimental_slack:
    options = tf.data.Options()
    options.experimental_slack = True
    dataset = dataset.with_options(options)

123
124
125
  return dataset


Toby Boyd's avatar
Toby Boyd committed
126
127
128
def get_synth_input_fn(height, width, num_channels, num_classes,
                       dtype=tf.float32):
  """Returns an input function that returns a dataset with random data.
129

Toby Boyd's avatar
Toby Boyd committed
130
131
132
133
  This input_fn returns a data set that iterates over a set of random data and
  bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
  copy is still included. This used to find the upper throughput bound when
  tunning the full input pipeline.
134
135
136
137
138
139
140

  Args:
    height: Integer height that will be used to create a fake image tensor.
    width: Integer width that will be used to create a fake image tensor.
    num_channels: Integer depth that will be used to create a fake image tensor.
    num_classes: Number of classes that should be represented in the fake labels
      tensor
Toby Boyd's avatar
Toby Boyd committed
141
    dtype: Data type for features/images.
142
143
144
145
146

  Returns:
    An input_fn that can be used in place of a real one to return a dataset
    that can be used for iteration.
  """
Toby Boyd's avatar
Toby Boyd committed
147
148
149
150
  # pylint: disable=unused-argument
  def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
    """Returns dataset filled with random data."""
    # Synthetic input should be within [0, 255].
151
    inputs = tf.random.truncated_normal(
Toby Boyd's avatar
Toby Boyd committed
152
153
154
155
156
157
        [batch_size] + [height, width, num_channels],
        dtype=dtype,
        mean=127,
        stddev=60,
        name='synthetic_inputs')

158
    labels = tf.random.uniform(
Toby Boyd's avatar
Toby Boyd committed
159
160
161
162
163
164
        [batch_size],
        minval=0,
        maxval=num_classes - 1,
        dtype=tf.int32,
        name='synthetic_labels')
    data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
165
    data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
Toby Boyd's avatar
Toby Boyd committed
166
    return data
167
168
169
170

  return input_fn


171
def image_bytes_serving_input_fn(image_shape, dtype=tf.float32):
172
173
174
175
176
  """Serving input fn for raw jpeg images."""

  def _preprocess_image(image_bytes):
    """Preprocess a single raw image."""
    # Bounding box around the whole image.
177
    bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=dtype, shape=[1, 1, 4])
178
179
180
181
182
    height, width, num_channels = image_shape
    image = imagenet_preprocessing.preprocess_image(
        image_bytes, bbox, height, width, num_channels, is_training=False)
    return image

183
  image_bytes_list = tf.compat.v1.placeholder(
184
185
      shape=[None], dtype=tf.string, name='input_tensor')
  images = tf.map_fn(
186
      _preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)
187
188
189
190
  return tf.estimator.export.TensorServingInputReceiver(
      images, {'image_bytes': image_bytes_list})


Toby Boyd's avatar
Toby Boyd committed
191
def override_flags_and_set_envars_for_gpu_thread_pool(flags_obj):
Toby Boyd's avatar
Toby Boyd committed
192
  """Override flags and set env_vars for performance.
Toby Boyd's avatar
Toby Boyd committed
193
194
195
196
197
198
199

  These settings exist to test the difference between using stock settings
  and manual tuning. It also shows some of the ENV_VARS that can be tweaked to
  squeeze a few extra examples per second.  These settings are defaulted to the
  current platform of interest, which changes over time.

  On systems with small numbers of cpu cores, e.g. under 8 logical cores,
Toby Boyd's avatar
Toby Boyd committed
200
201
  setting up a gpu thread pool with `tf_gpu_thread_mode=gpu_private` may perform
  poorly.
Toby Boyd's avatar
Toby Boyd committed
202
203
204
205
206

  Args:
    flags_obj: Current flags, which will be adjusted possibly overriding
    what has been set by the user on the command-line.
  """
Toby Boyd's avatar
Toby Boyd committed
207
  cpu_count = multiprocessing.cpu_count()
208
  tf.compat.v1.logging.info('Logical CPU cores: %s', cpu_count)
Toby Boyd's avatar
Toby Boyd committed
209
210
211
212
213
214

  # Sets up thread pool for each GPU for op scheduling.
  per_gpu_thread_count = 1
  total_gpu_thread_count = per_gpu_thread_count * flags_obj.num_gpus
  os.environ['TF_GPU_THREAD_MODE'] = flags_obj.tf_gpu_thread_mode
  os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
215
216
217
218
  tf.compat.v1.logging.info('TF_GPU_THREAD_COUNT: %s',
                            os.environ['TF_GPU_THREAD_COUNT'])
  tf.compat.v1.logging.info('TF_GPU_THREAD_MODE: %s',
                            os.environ['TF_GPU_THREAD_MODE'])
Toby Boyd's avatar
Toby Boyd committed
219
220
221
222
223
224
225
226
227

  # Reduces general thread pool by number of threads used for GPU pool.
  main_thread_count = cpu_count - total_gpu_thread_count
  flags_obj.inter_op_parallelism_threads = main_thread_count

  # Sets thread count for tf.data. Logical cores minus threads assign to the
  # private GPU pool along with 2 thread per GPU for event monitoring and
  # sending / receiving tensors.
  num_monitoring_threads = 2 * flags_obj.num_gpus
Toby Boyd's avatar
Toby Boyd committed
228
229
  flags_obj.datasets_num_private_threads = (cpu_count - total_gpu_thread_count
                                            - num_monitoring_threads)
Toby Boyd's avatar
Toby Boyd committed
230
231


232
233
234
235
################################################################################
# Functions for running training/eval/validation loops for the model.
################################################################################
def learning_rate_with_decay(
236
237
    batch_size, batch_denom, num_images, boundary_epochs, decay_rates,
    base_lr=0.1, warmup=False):
238
239
240
241
242
243
244
245
246
247
248
  """Get a learning rate that decays step-wise as training progresses.

  Args:
    batch_size: the number of examples processed in each training batch.
    batch_denom: this value will be used to scale the base learning rate.
      `0.1 * batch size` is divided by this number, such that when
      batch_denom == batch_size, the initial learning rate will be 0.1.
    num_images: total number of images that will be used for training.
    boundary_epochs: list of ints representing the epochs at which we
      decay the learning rate.
    decay_rates: list of floats representing the decay rates to be used
249
250
      for scaling the learning rate. It should have one more element
      than `boundary_epochs`, and all elements should have the same type.
251
252
    base_lr: Initial learning rate scaled based on batch_denom.
    warmup: Run a 5 epoch warmup to the initial lr.
253
254
255
256
257
  Returns:
    Returns a function that takes a single argument - the number of batches
    trained so far (global_step)- and returns the learning rate to be used
    for training the next batch.
  """
258
  initial_learning_rate = base_lr * batch_size / batch_denom
259
260
  batches_per_epoch = num_images / batch_size

Taylor Robie's avatar
Taylor Robie committed
261
262
263
  # Reduce the learning rate at certain epochs.
  # CIFAR-10: divide by 10 at epoch 100, 150, and 200
  # ImageNet: divide by 10 at epoch 30, 60, 80, and 90
264
265
266
267
  boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]
  vals = [initial_learning_rate * decay for decay in decay_rates]

  def learning_rate_fn(global_step):
268
    """Builds scaled learning rate function with 5 epoch warm up."""
269
    lr = tf.compat.v1.train.piecewise_constant(global_step, boundaries, vals)
270
271
272
273
274
    if warmup:
      warmup_steps = int(batches_per_epoch * 5)
      warmup_lr = (
          initial_learning_rate * tf.cast(global_step, tf.float32) / tf.cast(
              warmup_steps, tf.float32))
275
276
277
      return tf.cond(pred=global_step < warmup_steps,
                     true_fn=lambda: warmup_lr,
                     false_fn=lambda: lr)
278
    return lr
279

pkanwar23's avatar
pkanwar23 committed
280
281
282
283
284
285
286
287
288
  def poly_rate_fn(global_step):
    """Handles linear scaling rule, gradual warmup, and LR decay.

    The learning rate starts at 0, then it increases linearly per step.  After
    FLAGS.poly_warmup_epochs, we reach the base learning rate (scaled to account
    for batch size). The learning rate is then decayed using a polynomial rate
    decay schedule with power 2.0.

    Args:
Toby Boyd's avatar
Toby Boyd committed
289
      global_step: the current global_step
pkanwar23's avatar
pkanwar23 committed
290
291

    Returns:
Toby Boyd's avatar
Toby Boyd committed
292
      returns the current learning rate
pkanwar23's avatar
pkanwar23 committed
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
    """

    # Learning rate schedule for LARS polynomial schedule
    if flags.FLAGS.batch_size < 8192:
      plr = 5.0
      w_epochs = 5
    elif flags.FLAGS.batch_size < 16384:
      plr = 10.0
      w_epochs = 5
    elif flags.FLAGS.batch_size < 32768:
      plr = 25.0
      w_epochs = 5
    else:
      plr = 32.0
      w_epochs = 14

    w_steps = int(w_epochs * batches_per_epoch)
    wrate = (plr * tf.cast(global_step, tf.float32) / tf.cast(
        w_steps, tf.float32))

    # TODO(pkanwar): use a flag to help calc num_epochs.
    num_epochs = 90
    train_steps = batches_per_epoch * num_epochs

    min_step = tf.constant(1, dtype=tf.int64)
    decay_steps = tf.maximum(min_step, tf.subtract(global_step, w_steps))
    poly_rate = tf.train.polynomial_decay(
        plr,
        decay_steps,
        train_steps - w_steps + 1,
        power=2.0)
    return tf.where(global_step <= w_steps, wrate, poly_rate)

  # For LARS we have a new learning rate schedule
  if flags.FLAGS.enable_lars:
    return poly_rate_fn

330
331
332
333
334
  return learning_rate_fn


def resnet_model_fn(features, labels, mode, model_class,
                    resnet_size, weight_decay, learning_rate_fn, momentum,
335
                    data_format, resnet_version, loss_scale,
Zac Wellmer's avatar
Zac Wellmer committed
336
                    loss_filter_fn=None, dtype=resnet_model.DEFAULT_DTYPE,
pkanwar23's avatar
pkanwar23 committed
337
                    fine_tune=False, label_smoothing=0.0):
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
  """Shared functionality for different resnet model_fns.

  Initializes the ResnetModel representing the model layers
  and uses that model to build the necessary EstimatorSpecs for
  the `mode` in question. For training, this means building losses,
  the optimizer, and the train op that get passed into the EstimatorSpec.
  For evaluation and prediction, the EstimatorSpec is returned without
  a train op, but with the necessary parameters for the given mode.

  Args:
    features: tensor representing input images
    labels: tensor representing class labels for all input images
    mode: current estimator mode; should be one of
      `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`
    model_class: a class representing a TensorFlow model that has a __call__
      function. We assume here that this is a subclass of ResnetModel.
    resnet_size: A single integer for the size of the ResNet model.
    weight_decay: weight decay loss rate used to regularize learned variables.
    learning_rate_fn: function that returns the current learning rate given
      the current global_step
    momentum: momentum term used for optimization
    data_format: Input format ('channels_last', 'channels_first', or None).
      If set to None, the format is dependent on whether a GPU is available.
361
362
    resnet_version: Integer representing which version of the ResNet network to
      use. See README for details. Valid values: [1, 2]
363
364
    loss_scale: The factor to scale the loss for numerical stability. A detailed
      summary is present in the arg parser help text.
365
366
367
368
    loss_filter_fn: function that takes a string variable name and returns
      True if the var should be included in loss calculation, and False
      otherwise. If None, batch_normalization variables will be excluded
      from the loss.
369
    dtype: the TensorFlow dtype to use for calculations.
Zac Wellmer's avatar
Zac Wellmer committed
370
    fine_tune: If True only train the dense layers(final layers).
Toby Boyd's avatar
Toby Boyd committed
371
    label_smoothing: If greater than 0 then smooth the labels.
372
373
374
375
376
377
378

  Returns:
    EstimatorSpec parameterized according to the input params and the
    current mode.
  """

  # Generate a summary node for the images
379
  tf.compat.v1.summary.image('images', features, max_outputs=6)
380
381
  # Checks that features/images have same data type being used for calculations.
  assert features.dtype == dtype
382

383
384
  model = model_class(resnet_size, data_format, resnet_version=resnet_version,
                      dtype=dtype)
385

386
387
  logits = model(features, mode == tf.estimator.ModeKeys.TRAIN)

388
389
390
391
392
  # This acts as a no-op if the logits are already in fp32 (provided logits are
  # not a SparseTensor). If dtype is is low precision, logits must be cast to
  # fp32 for numerical stability.
  logits = tf.cast(logits, tf.float32)

393
  predictions = {
394
      'classes': tf.argmax(input=logits, axis=1),
395
396
397
398
      'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
  }

  if mode == tf.estimator.ModeKeys.PREDICT:
399
400
401
402
403
404
405
    # Return the predictions and the specification for serving a SavedModel
    return tf.estimator.EstimatorSpec(
        mode=mode,
        predictions=predictions,
        export_outputs={
            'predict': tf.estimator.export.PredictOutput(predictions)
        })
406
407

  # Calculate loss, which includes softmax cross entropy and L2 regularization.
pkanwar23's avatar
pkanwar23 committed
408
409
410
411
412
413
  if label_smoothing != 0.0:
    one_hot_labels = tf.one_hot(labels, 1001)
    cross_entropy = tf.losses.softmax_cross_entropy(
        logits=logits, onehot_labels=one_hot_labels,
        label_smoothing=label_smoothing)
  else:
Toby Boyd's avatar
Toby Boyd committed
414
    cross_entropy = tf.compat.v1.losses.sparse_softmax_cross_entropy(
pkanwar23's avatar
pkanwar23 committed
415
        logits=logits, labels=labels)
416
417
418

  # Create a tensor named cross_entropy for logging purposes.
  tf.identity(cross_entropy, name='cross_entropy')
419
  tf.compat.v1.summary.scalar('cross_entropy', cross_entropy)
420
421
422

  # If no loss_filter_fn is passed, assume we want the default behavior,
  # which is that batch_normalization variables are excluded from loss.
Karmel Allison's avatar
Karmel Allison committed
423
424
425
  def exclude_batch_norm(name):
    return 'batch_normalization' not in name
  loss_filter_fn = loss_filter_fn or exclude_batch_norm
426

427
  # Add weight decay to the loss.
428
  l2_loss = weight_decay * tf.add_n(
429
      # loss is computed using fp32 for numerical stability.
430
431
      [
          tf.nn.l2_loss(tf.cast(v, tf.float32))
432
          for v in tf.compat.v1.trainable_variables()
433
          if loss_filter_fn(v.name)
434
      ])
435
  tf.compat.v1.summary.scalar('l2_loss', l2_loss)
436
  loss = cross_entropy + l2_loss
437
438

  if mode == tf.estimator.ModeKeys.TRAIN:
439
    global_step = tf.compat.v1.train.get_or_create_global_step()
440
441
442
443
444

    learning_rate = learning_rate_fn(global_step)

    # Create a tensor named learning_rate for logging purposes
    tf.identity(learning_rate, name='learning_rate')
445
    tf.compat.v1.summary.scalar('learning_rate', learning_rate)
446

pkanwar23's avatar
pkanwar23 committed
447
448
449
450
451
452
453
454
455
456
457
    if flags.FLAGS.enable_lars:
      optimizer = tf.contrib.opt.LARSOptimizer(
          learning_rate,
          momentum=momentum,
          weight_decay=weight_decay,
          skip_list=['batch_normalization', 'bias'])
    else:
      optimizer = tf.compat.v1.train.MomentumOptimizer(
          learning_rate=learning_rate,
          momentum=momentum
      )
458

459
460
    fp16_implementation = getattr(flags.FLAGS, 'fp16_implementation', None)
    if fp16_implementation == 'graph_rewrite':
Toby Boyd's avatar
Toby Boyd committed
461
462
463
      optimizer = (
          tf.compat.v1.train.experimental.enable_mixed_precision_graph_rewrite(
              optimizer, loss_scale=loss_scale))
464

Zac Wellmer's avatar
Zac Wellmer committed
465
    def _dense_grad_filter(gvs):
466
467
468
469
      """Only apply gradient updates to the final layer.

      This function is used for fine tuning.

Zac Wellmer's avatar
Zac Wellmer committed
470
      Args:
471
        gvs: list of tuples with gradients and variable info
Zac Wellmer's avatar
Zac Wellmer committed
472
      Returns:
473
474
        filtered gradients so that only the dense layer remains
      """
Zac Wellmer's avatar
Zac Wellmer committed
475
476
      return [(g, v) for g, v in gvs if 'dense' in v.name]

477
    if loss_scale != 1 and fp16_implementation != 'graph_rewrite':
478
479
480
481
482
      # When computing fp16 gradients, often intermediate tensor values are
      # so small, they underflow to 0. To avoid this, we multiply the loss by
      # loss_scale to make these tensor values loss_scale times bigger.
      scaled_grad_vars = optimizer.compute_gradients(loss * loss_scale)

Zac Wellmer's avatar
Zac Wellmer committed
483
484
485
      if fine_tune:
        scaled_grad_vars = _dense_grad_filter(scaled_grad_vars)

486
487
488
489
490
491
      # Once the gradient computation is complete we can scale the gradients
      # back to the correct scale before passing them to the optimizer.
      unscaled_grad_vars = [(grad / loss_scale, var)
                            for grad, var in scaled_grad_vars]
      minimize_op = optimizer.apply_gradients(unscaled_grad_vars, global_step)
    else:
Zac Wellmer's avatar
Zac Wellmer committed
492
493
494
495
      grad_vars = optimizer.compute_gradients(loss)
      if fine_tune:
        grad_vars = _dense_grad_filter(grad_vars)
      minimize_op = optimizer.apply_gradients(grad_vars, global_step)
496

497
    update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
498
    train_op = tf.group(minimize_op, update_ops)
499
500
501
  else:
    train_op = None

502
503
504
  accuracy = tf.compat.v1.metrics.accuracy(labels, predictions['classes'])
  accuracy_top_5 = tf.compat.v1.metrics.mean(
      tf.nn.in_top_k(predictions=logits, targets=labels, k=5, name='top_5_op'))
505
506
  metrics = {'accuracy': accuracy,
             'accuracy_top_5': accuracy_top_5}
507
508
509

  # Create a tensor named train_accuracy for logging purposes
  tf.identity(accuracy[1], name='train_accuracy')
510
  tf.identity(accuracy_top_5[1], name='train_accuracy_top_5')
511
512
  tf.compat.v1.summary.scalar('train_accuracy', accuracy[1])
  tf.compat.v1.summary.scalar('train_accuracy_top_5', accuracy_top_5[1])
513
514
515
516
517

  return tf.estimator.EstimatorSpec(
      mode=mode,
      predictions=predictions,
      loss=loss,
518
519
      train_op=train_op,
      eval_metric_ops=metrics)
520
521


522
523
def resnet_main(
    flags_obj, model_function, input_function, dataset_name, shape=None):
524
525
526
  """Shared main loop for ResNet Models.

  Args:
527
528
    flags_obj: An object containing parsed flags. See define_resnet_flags()
      for details.
529
530
531
532
533
    model_function: the function that instantiates the Model and builds the
      ops for train/eval. This will be passed directly into the estimator.
    input_function: the function that processes the dataset and returns a
      dataset that the estimator can train on. This will be wrapped with
      all the relevant flags for running and passed to estimator.
534
535
    dataset_name: the name of the dataset for training and evaluation. This is
      used for logging purpose.
536
    shape: list of ints representing the shape of the images used for training.
537
      This is only used if flags_obj.export_dir is passed.
538

539
540
  Returns:
     Dict of results of the run.  Contains the keys `eval_results` and
541
542
    `train_hooks`. `eval_results` contains accuracy (top_1) and accuracy_top_5.
    `train_hooks` is a list the instances of hooks used during training.
543
  """
Karmel Allison's avatar
Karmel Allison committed
544

545
546
  model_helpers.apply_clean(flags.FLAGS)

Toby Boyd's avatar
Toby Boyd committed
547
  # Ensures flag override logic is only executed if explicitly triggered.
Toby Boyd's avatar
Toby Boyd committed
548
  if flags_obj.tf_gpu_thread_mode:
Toby Boyd's avatar
Toby Boyd committed
549
    override_flags_and_set_envars_for_gpu_thread_pool(flags_obj)
Toby Boyd's avatar
Toby Boyd committed
550

551
552
553
554
  # Configures cluster spec for distribution strategy.
  num_workers = distribution_utils.configure_cluster(flags_obj.worker_hosts,
                                                     flags_obj.task_index)

Toby Boyd's avatar
Toby Boyd committed
555
556
  # Creates session config. allow_soft_placement = True, is required for
  # multi-GPU and is not harmful for other modes.
557
  session_config = tf.compat.v1.ConfigProto(
Toby Boyd's avatar
Toby Boyd committed
558
559
560
      inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads,
      intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads,
      allow_soft_placement=True)
561

562
  distribution_strategy = distribution_utils.get_distribution_strategy(
563
564
      distribution_strategy=flags_obj.distribution_strategy,
      num_gpus=flags_core.get_num_gpus(flags_obj),
565
      num_workers=num_workers,
566
567
      all_reduce_alg=flags_obj.all_reduce_alg,
      num_packs=flags_obj.num_packs)
568

Toby Boyd's avatar
Toby Boyd committed
569
  # Creates a `RunConfig` that checkpoints every 24 hours which essentially
Toby Boyd's avatar
Toby Boyd committed
570
  # results in checkpoints determined only by `epochs_between_evals`.
571
  run_config = tf.estimator.RunConfig(
Toby Boyd's avatar
Toby Boyd committed
572
573
      train_distribute=distribution_strategy,
      session_config=session_config,
574
      save_checkpoints_secs=60*60*24,
575
      save_checkpoints_steps=None)
576

Toby Boyd's avatar
Toby Boyd committed
577
  # Initializes model with all but the dense layer from pretrained ResNet.
Zac Wellmer's avatar
Zac Wellmer committed
578
579
580
581
582
583
584
  if flags_obj.pretrained_model_checkpoint_path is not None:
    warm_start_settings = tf.estimator.WarmStartSettings(
        flags_obj.pretrained_model_checkpoint_path,
        vars_to_warm_start='^(?!.*dense)')
  else:
    warm_start_settings = None

585
  classifier = tf.estimator.Estimator(
586
      model_fn=model_function, model_dir=flags_obj.model_dir, config=run_config,
Zac Wellmer's avatar
Zac Wellmer committed
587
      warm_start_from=warm_start_settings, params={
588
589
590
          'resnet_size': int(flags_obj.resnet_size),
          'data_format': flags_obj.data_format,
          'batch_size': flags_obj.batch_size,
591
          'resnet_version': int(flags_obj.resnet_version),
592
593
          'loss_scale': flags_core.get_loss_scale(flags_obj,
                                                  default_for_fp16=128),
Zac Wellmer's avatar
Zac Wellmer committed
594
          'dtype': flags_core.get_tf_dtype(flags_obj),
595
596
          'fine_tune': flags_obj.fine_tune,
          'num_workers': num_workers,
597
598
      })

599
600
601
602
  run_params = {
      'batch_size': flags_obj.batch_size,
      'dtype': flags_core.get_tf_dtype(flags_obj),
      'resnet_size': flags_obj.resnet_size,
603
      'resnet_version': flags_obj.resnet_version,
604
605
      'synthetic_data': flags_obj.use_synthetic_data,
      'train_epochs': flags_obj.train_epochs,
606
      'num_workers': num_workers,
607
  }
608
  if flags_obj.use_synthetic_data:
609
    dataset_name = dataset_name + '-synthetic'
610

611
  benchmark_logger = logger.get_benchmark_logger()
612
613
  benchmark_logger.log_run_info('resnet', dataset_name, run_params,
                                test_id=flags_obj.benchmark_test_id)
614

615
  train_hooks = hooks_helper.get_train_hooks(
616
      flags_obj.hooks,
617
      model_dir=flags_obj.model_dir,
618
      batch_size=flags_obj.batch_size)
619

620
  def input_fn_train(num_epochs, input_context=None):
621
    return input_function(
Toby Boyd's avatar
Toby Boyd committed
622
623
        is_training=True,
        data_dir=flags_obj.data_dir,
624
        batch_size=distribution_utils.per_replica_batch_size(
625
            flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
Taylor Robie's avatar
Taylor Robie committed
626
        num_epochs=num_epochs,
Toby Boyd's avatar
Toby Boyd committed
627
628
        dtype=flags_core.get_tf_dtype(flags_obj),
        datasets_num_private_threads=flags_obj.datasets_num_private_threads,
629
        input_context=input_context)
630

631
  def input_fn_eval():
632
    return input_function(
Toby Boyd's avatar
Toby Boyd committed
633
634
        is_training=False,
        data_dir=flags_obj.data_dir,
635
        batch_size=distribution_utils.per_replica_batch_size(
636
            flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
637
638
        num_epochs=1,
        dtype=flags_core.get_tf_dtype(flags_obj))
Taylor Robie's avatar
Taylor Robie committed
639

640
641
642
  train_epochs = (0 if flags_obj.eval_only or not flags_obj.train_epochs else
                  flags_obj.train_epochs)

643
  use_train_and_evaluate = flags_obj.use_train_and_evaluate or num_workers > 1
644
645
  if use_train_and_evaluate:
    train_spec = tf.estimator.TrainSpec(
646
647
648
        input_fn=lambda input_context=None: input_fn_train(
            train_epochs, input_context=input_context),
        hooks=train_hooks,
649
        max_steps=flags_obj.max_train_steps)
650
    eval_spec = tf.estimator.EvalSpec(input_fn=input_fn_eval)
651
    tf.compat.v1.logging.info('Starting to train and evaluate.')
652
653
654
    tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec)
    # tf.estimator.train_and_evalute doesn't return anything in multi-worker
    # case.
655
    eval_results = {}
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
  else:
    if train_epochs == 0:
      # If --eval_only is set, perform a single loop with zero train epochs.
      schedule, n_loops = [0], 1
    else:
      # Compute the number of times to loop while training. All but the last
      # pass will train for `epochs_between_evals` epochs, while the last will
      # train for the number needed to reach `training_epochs`. For instance if
      #   train_epochs = 25 and epochs_between_evals = 10
      # schedule will be set to [10, 10, 5]. That is to say, the loop will:
      #   Train for 10 epochs and then evaluate.
      #   Train for another 10 epochs and then evaluate.
      #   Train for a final 5 epochs (to reach 25 epochs) and then evaluate.
      n_loops = math.ceil(train_epochs / flags_obj.epochs_between_evals)
      schedule = [flags_obj.epochs_between_evals for _ in range(int(n_loops))]
      schedule[-1] = train_epochs - sum(schedule[:-1])  # over counting.

    for cycle_index, num_train_epochs in enumerate(schedule):
      tf.compat.v1.logging.info('Starting cycle: %d/%d', cycle_index,
                                int(n_loops))

      if num_train_epochs:
678
679
680
681
        # Since we are calling classifier.train immediately in each loop, the
        # value of num_train_epochs in the lambda function will not be changed
        # before it is used. So it is safe to ignore the pylint error here
        # pylint: disable=cell-var-from-loop
682
683
684
685
686
        classifier.train(
            input_fn=lambda input_context=None: input_fn_train(
                num_train_epochs, input_context=input_context),
            hooks=train_hooks,
            max_steps=flags_obj.max_train_steps)
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702

      # flags_obj.max_train_steps is generally associated with testing and
      # profiling. As a result it is frequently called with synthetic data,
      # which will iterate forever. Passing steps=flags_obj.max_train_steps
      # allows the eval (which is generally unimportant in those circumstances)
      # to terminate.  Note that eval will run for max_train_steps each loop,
      # regardless of the global_step count.
      tf.compat.v1.logging.info('Starting to evaluate.')
      eval_results = classifier.evaluate(input_fn=input_fn_eval,
                                         steps=flags_obj.max_train_steps)

      benchmark_logger.log_evaluation_result(eval_results)

      if model_helpers.past_stop_threshold(
          flags_obj.stop_threshold, eval_results['accuracy']):
        break
703

704
  if flags_obj.export_dir is not None:
705
    # Exports a saved model for the given classifier.
706
    export_dtype = flags_core.get_tf_dtype(flags_obj)
707
    if flags_obj.image_bytes_as_serving_input:
708
709
      input_receiver_fn = functools.partial(
          image_bytes_serving_input_fn, shape, dtype=export_dtype)
710
711
    else:
      input_receiver_fn = export.build_tensor_serving_input_receiver_fn(
712
713
714
          shape, batch_size=flags_obj.batch_size, dtype=export_dtype)
    classifier.export_savedmodel(flags_obj.export_dir, input_receiver_fn,
                                 strip_default_attrs=True)
715
716
717
718
719
720
721

  stats = {}
  stats['eval_results'] = eval_results
  stats['train_hooks'] = train_hooks

  return stats

722

723
def define_resnet_flags(resnet_size_choices=None, dynamic_loss_scale=False,
724
                        fp16_implementation=False):
725
  """Add flags and validators for ResNet."""
726
  flags_core.define_base(clean=True, train_epochs=True,
727
728
729
                         epochs_between_evals=True, stop_threshold=True,
                         num_gpu=True, hooks=True, export_dir=True,
                         distribution_strategy=True)
Toby Boyd's avatar
Toby Boyd committed
730
  flags_core.define_performance(num_parallel_calls=False,
731
732
                                inter_op=True,
                                intra_op=True,
733
734
735
736
                                synthetic_data=True,
                                dtype=True,
                                all_reduce_alg=True,
                                num_packs=True,
Toby Boyd's avatar
Toby Boyd committed
737
738
                                tf_gpu_thread_mode=True,
                                datasets_num_private_threads=True,
739
                                dynamic_loss_scale=dynamic_loss_scale,
740
                                fp16_implementation=fp16_implementation,
741
                                loss_scale=True,
742
743
                                tf_data_experimental_slack=True,
                                max_train_steps=True)
744
745
  flags_core.define_image()
  flags_core.define_benchmark()
746
  flags_core.define_distribution()
747
  flags.adopt_module_key_flags(flags_core)
748

749
  flags.DEFINE_enum(
Toby Boyd's avatar
Toby Boyd committed
750
      name='resnet_version', short_name='rv', default='1',
751
      enum_values=['1', '2'],
752
753
      help=flags_core.help_wrap(
          'Version of ResNet. (1 or 2) See README.md for details.'))
Zac Wellmer's avatar
Zac Wellmer committed
754
755
756
757
758
759
760
761
762
  flags.DEFINE_bool(
      name='fine_tune', short_name='ft', default=False,
      help=flags_core.help_wrap(
          'If True do not train any parameters except for the final layer.'))
  flags.DEFINE_string(
      name='pretrained_model_checkpoint_path', short_name='pmcp', default=None,
      help=flags_core.help_wrap(
          'If not None initialize all the network except the final layer with '
          'these values'))
Taylor Robie's avatar
Taylor Robie committed
763
  flags.DEFINE_boolean(
764
      name='eval_only', default=False,
Taylor Robie's avatar
Taylor Robie committed
765
766
      help=flags_core.help_wrap('Skip training and only perform evaluation on '
                                'the latest checkpoint.'))
767
  flags.DEFINE_boolean(
Toby Boyd's avatar
Toby Boyd committed
768
      name='image_bytes_as_serving_input', default=False,
769
770
771
772
773
774
775
      help=flags_core.help_wrap(
          'If True exports savedmodel with serving signature that accepts '
          'JPEG image bytes instead of a fixed size [HxWxC] tensor that '
          'represents the image. The former is easier to use for serving at '
          'the expense of image resize/cropping being done as part of model '
          'inference. Note, this flag only applies to ImageNet and cannot '
          'be used for CIFAR.'))
776
777
778
779
780
781
  flags.DEFINE_boolean(
      name='use_train_and_evaluate', default=False,
      help=flags_core.help_wrap(
          'If True, uses `tf.estimator.train_and_evaluate` for the training '
          'and evaluation loop, instead of separate calls to `classifier.train '
          'and `classifier.evaluate`, which is the default behavior.'))
pkanwar23's avatar
pkanwar23 committed
782
783
784
785
786
787
788
789
790
791
792
793
794
  flags.DEFINE_bool(
      name='enable_lars', default=False,
      help=flags_core.help_wrap(
          'Enable LARS optimizer for large batch training.'))
  flags.DEFINE_float(
      name='label_smoothing', default=0.0,
      help=flags_core.help_wrap(
          'Label smoothing parameter used in the softmax_cross_entropy'))
  flags.DEFINE_float(
      name='weight_decay', default=1e-4,
      help=flags_core.help_wrap(
          'Weight decay coefficiant for l2 regularization.'))

795
796
797
  choice_kwargs = dict(
      name='resnet_size', short_name='rs', default='50',
      help=flags_core.help_wrap('The size of the ResNet model to use.'))
798

799
800
801
802
  if resnet_size_choices is None:
    flags.DEFINE_string(**choice_kwargs)
  else:
    flags.DEFINE_enum(enum_values=resnet_size_choices, **choice_kwargs)