resnet_run_loop.py 27.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains utility and supporting functions for ResNet.

  This module contains ResNet code which does not directly build layers. This
includes dataset management, hyperparameter and optimizer code, and argument
parsing. Code for defining the ResNet layers can be found in resnet_model.py.
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

26
import functools
Taylor Robie's avatar
Taylor Robie committed
27
import math
Toby Boyd's avatar
Toby Boyd committed
28
import multiprocessing
29
30
import os

31
# pylint: disable=g-bad-import-order
32
from absl import flags
33
import tensorflow as tf
Toby Boyd's avatar
Toby Boyd committed
34
from tensorflow.contrib.data.python.ops import threadpool
35
36

from official.resnet import resnet_model
37
from official.utils.flags import core as flags_core
38
from official.utils.export import export
39
40
from official.utils.logs import hooks_helper
from official.utils.logs import logger
41
from official.resnet import imagenet_preprocessing
42
from official.utils.misc import distribution_utils
43
from official.utils.misc import model_helpers
44
45
46
47
48


################################################################################
# Functions for input processing.
################################################################################
Toby Boyd's avatar
Toby Boyd committed
49
50
51
52
53
54
55
56
57
def process_record_dataset(dataset,
                           is_training,
                           batch_size,
                           shuffle_buffer,
                           parse_record_fn,
                           num_epochs=1,
                           dtype=tf.float32,
                           datasets_num_private_threads=None,
                           num_parallel_batches=1):
Karmel Allison's avatar
Karmel Allison committed
58
  """Given a Dataset with raw records, return an iterator over the records.
59
60
61
62
63
64
65
66
67
68
69

  Args:
    dataset: A Dataset representing raw records
    is_training: A boolean denoting whether the input is for training.
    batch_size: The number of samples per batch.
    shuffle_buffer: The buffer size to use when shuffling records. A larger
      value results in better randomness, but smaller values reduce startup
      time and use less memory.
    parse_record_fn: A function that takes a raw record and returns the
      corresponding (image, label) pair.
    num_epochs: The number of epochs to repeat the dataset.
70
    dtype: Data type to use for images/features.
Toby Boyd's avatar
Toby Boyd committed
71
72
73
    datasets_num_private_threads: Number of threads for a private
      threadpool created for all datasets computation.
    num_parallel_batches: Number of parallel batches for tf.data.
74
75
76
77

  Returns:
    Dataset of (image, label) pairs ready for iteration.
  """
78

79
80
  # Prefetches a batch at a time to smooth out the time taken to load input
  # files for shuffling and processing.
81
82
  dataset = dataset.prefetch(buffer_size=batch_size)
  if is_training:
83
    # Shuffles records before repeating to respect epoch boundaries.
84
85
    dataset = dataset.shuffle(buffer_size=shuffle_buffer)

86
  # Repeats the dataset for the number of epochs to train.
87
88
  dataset = dataset.repeat(num_epochs)

89
  # Parses the raw records into images and labels.
90
  dataset = dataset.apply(
91
      tf.data.experimental.map_and_batch(
92
          lambda value: parse_record_fn(value, is_training, dtype),
93
          batch_size=batch_size,
Toby Boyd's avatar
Toby Boyd committed
94
          num_parallel_batches=num_parallel_batches,
95
          drop_remainder=False))
96
97
98
99

  # Operations between the final prefetch and the get_next call to the iterator
  # will happen synchronously during run time. We prefetch here again to
  # background all of the above processing work and keep it out of the
100
101
102
  # critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE
  # allows DistributionStrategies to adjust how many batches to fetch based
  # on how many devices are present.
103
  dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
104

Toby Boyd's avatar
Toby Boyd committed
105
106
  # Defines a specific size thread pool for tf.data operations.
  if datasets_num_private_threads:
107
108
    tf.compat.v1.logging.info('datasets_num_private_threads: %s',
                              datasets_num_private_threads)
Toby Boyd's avatar
Toby Boyd committed
109
110
111
112
113
114
    dataset = threadpool.override_threadpool(
        dataset,
        threadpool.PrivateThreadPool(
            datasets_num_private_threads,
            display_name='input_pipeline_thread_pool'))

115
116
117
  return dataset


Toby Boyd's avatar
Toby Boyd committed
118
119
120
def get_synth_input_fn(height, width, num_channels, num_classes,
                       dtype=tf.float32):
  """Returns an input function that returns a dataset with random data.
121

Toby Boyd's avatar
Toby Boyd committed
122
123
124
125
  This input_fn returns a data set that iterates over a set of random data and
  bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
  copy is still included. This used to find the upper throughput bound when
  tunning the full input pipeline.
126
127
128
129
130
131
132

  Args:
    height: Integer height that will be used to create a fake image tensor.
    width: Integer width that will be used to create a fake image tensor.
    num_channels: Integer depth that will be used to create a fake image tensor.
    num_classes: Number of classes that should be represented in the fake labels
      tensor
Toby Boyd's avatar
Toby Boyd committed
133
    dtype: Data type for features/images.
134
135
136
137
138

  Returns:
    An input_fn that can be used in place of a real one to return a dataset
    that can be used for iteration.
  """
Toby Boyd's avatar
Toby Boyd committed
139
140
141
142
  # pylint: disable=unused-argument
  def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
    """Returns dataset filled with random data."""
    # Synthetic input should be within [0, 255].
143
    inputs = tf.random.truncated_normal(
Toby Boyd's avatar
Toby Boyd committed
144
145
146
147
148
149
        [batch_size] + [height, width, num_channels],
        dtype=dtype,
        mean=127,
        stddev=60,
        name='synthetic_inputs')

150
    labels = tf.random.uniform(
Toby Boyd's avatar
Toby Boyd committed
151
152
153
154
155
156
        [batch_size],
        minval=0,
        maxval=num_classes - 1,
        dtype=tf.int32,
        name='synthetic_labels')
    data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
157
    data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
Toby Boyd's avatar
Toby Boyd committed
158
    return data
159
160
161
162

  return input_fn


163
def image_bytes_serving_input_fn(image_shape, dtype=tf.float32):
164
165
166
167
168
  """Serving input fn for raw jpeg images."""

  def _preprocess_image(image_bytes):
    """Preprocess a single raw image."""
    # Bounding box around the whole image.
169
    bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=dtype, shape=[1, 1, 4])
170
171
172
173
174
    height, width, num_channels = image_shape
    image = imagenet_preprocessing.preprocess_image(
        image_bytes, bbox, height, width, num_channels, is_training=False)
    return image

175
  image_bytes_list = tf.compat.v1.placeholder(
176
177
      shape=[None], dtype=tf.string, name='input_tensor')
  images = tf.map_fn(
178
      _preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)
179
180
181
182
  return tf.estimator.export.TensorServingInputReceiver(
      images, {'image_bytes': image_bytes_list})


Toby Boyd's avatar
Toby Boyd committed
183
def override_flags_and_set_envars_for_gpu_thread_pool(flags_obj):
Toby Boyd's avatar
Toby Boyd committed
184
  """Override flags and set env_vars for performance.
Toby Boyd's avatar
Toby Boyd committed
185
186
187
188
189
190
191

  These settings exist to test the difference between using stock settings
  and manual tuning. It also shows some of the ENV_VARS that can be tweaked to
  squeeze a few extra examples per second.  These settings are defaulted to the
  current platform of interest, which changes over time.

  On systems with small numbers of cpu cores, e.g. under 8 logical cores,
Toby Boyd's avatar
Toby Boyd committed
192
193
  setting up a gpu thread pool with `tf_gpu_thread_mode=gpu_private` may perform
  poorly.
Toby Boyd's avatar
Toby Boyd committed
194
195
196
197
198

  Args:
    flags_obj: Current flags, which will be adjusted possibly overriding
    what has been set by the user on the command-line.
  """
Toby Boyd's avatar
Toby Boyd committed
199
  cpu_count = multiprocessing.cpu_count()
200
  tf.compat.v1.logging.info('Logical CPU cores: %s', cpu_count)
Toby Boyd's avatar
Toby Boyd committed
201
202
203
204
205
206

  # Sets up thread pool for each GPU for op scheduling.
  per_gpu_thread_count = 1
  total_gpu_thread_count = per_gpu_thread_count * flags_obj.num_gpus
  os.environ['TF_GPU_THREAD_MODE'] = flags_obj.tf_gpu_thread_mode
  os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
207
208
209
210
  tf.compat.v1.logging.info('TF_GPU_THREAD_COUNT: %s',
                            os.environ['TF_GPU_THREAD_COUNT'])
  tf.compat.v1.logging.info('TF_GPU_THREAD_MODE: %s',
                            os.environ['TF_GPU_THREAD_MODE'])
Toby Boyd's avatar
Toby Boyd committed
211
212
213
214
215
216
217
218
219

  # Reduces general thread pool by number of threads used for GPU pool.
  main_thread_count = cpu_count - total_gpu_thread_count
  flags_obj.inter_op_parallelism_threads = main_thread_count

  # Sets thread count for tf.data. Logical cores minus threads assign to the
  # private GPU pool along with 2 thread per GPU for event monitoring and
  # sending / receiving tensors.
  num_monitoring_threads = 2 * flags_obj.num_gpus
Toby Boyd's avatar
Toby Boyd committed
220
221
  flags_obj.datasets_num_private_threads = (cpu_count - total_gpu_thread_count
                                            - num_monitoring_threads)
Toby Boyd's avatar
Toby Boyd committed
222
223


224
225
226
227
################################################################################
# Functions for running training/eval/validation loops for the model.
################################################################################
def learning_rate_with_decay(
228
229
    batch_size, batch_denom, num_images, boundary_epochs, decay_rates,
    base_lr=0.1, warmup=False):
230
231
232
233
234
235
236
237
238
239
240
  """Get a learning rate that decays step-wise as training progresses.

  Args:
    batch_size: the number of examples processed in each training batch.
    batch_denom: this value will be used to scale the base learning rate.
      `0.1 * batch size` is divided by this number, such that when
      batch_denom == batch_size, the initial learning rate will be 0.1.
    num_images: total number of images that will be used for training.
    boundary_epochs: list of ints representing the epochs at which we
      decay the learning rate.
    decay_rates: list of floats representing the decay rates to be used
241
242
      for scaling the learning rate. It should have one more element
      than `boundary_epochs`, and all elements should have the same type.
243
244
    base_lr: Initial learning rate scaled based on batch_denom.
    warmup: Run a 5 epoch warmup to the initial lr.
245
246
247
248
249
  Returns:
    Returns a function that takes a single argument - the number of batches
    trained so far (global_step)- and returns the learning rate to be used
    for training the next batch.
  """
250
  initial_learning_rate = base_lr * batch_size / batch_denom
251
252
  batches_per_epoch = num_images / batch_size

Taylor Robie's avatar
Taylor Robie committed
253
254
255
  # Reduce the learning rate at certain epochs.
  # CIFAR-10: divide by 10 at epoch 100, 150, and 200
  # ImageNet: divide by 10 at epoch 30, 60, 80, and 90
256
257
258
259
  boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]
  vals = [initial_learning_rate * decay for decay in decay_rates]

  def learning_rate_fn(global_step):
260
    """Builds scaled learning rate function with 5 epoch warm up."""
261
    lr = tf.compat.v1.train.piecewise_constant(global_step, boundaries, vals)
262
263
264
265
266
    if warmup:
      warmup_steps = int(batches_per_epoch * 5)
      warmup_lr = (
          initial_learning_rate * tf.cast(global_step, tf.float32) / tf.cast(
              warmup_steps, tf.float32))
267
268
269
      return tf.cond(pred=global_step < warmup_steps,
                     true_fn=lambda: warmup_lr,
                     false_fn=lambda: lr)
270
    return lr
271
272
273
274
275
276

  return learning_rate_fn


def resnet_model_fn(features, labels, mode, model_class,
                    resnet_size, weight_decay, learning_rate_fn, momentum,
277
                    data_format, resnet_version, loss_scale,
Zac Wellmer's avatar
Zac Wellmer committed
278
279
                    loss_filter_fn=None, dtype=resnet_model.DEFAULT_DTYPE,
                    fine_tune=False):
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
  """Shared functionality for different resnet model_fns.

  Initializes the ResnetModel representing the model layers
  and uses that model to build the necessary EstimatorSpecs for
  the `mode` in question. For training, this means building losses,
  the optimizer, and the train op that get passed into the EstimatorSpec.
  For evaluation and prediction, the EstimatorSpec is returned without
  a train op, but with the necessary parameters for the given mode.

  Args:
    features: tensor representing input images
    labels: tensor representing class labels for all input images
    mode: current estimator mode; should be one of
      `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`
    model_class: a class representing a TensorFlow model that has a __call__
      function. We assume here that this is a subclass of ResnetModel.
    resnet_size: A single integer for the size of the ResNet model.
    weight_decay: weight decay loss rate used to regularize learned variables.
    learning_rate_fn: function that returns the current learning rate given
      the current global_step
    momentum: momentum term used for optimization
    data_format: Input format ('channels_last', 'channels_first', or None).
      If set to None, the format is dependent on whether a GPU is available.
303
304
    resnet_version: Integer representing which version of the ResNet network to
      use. See README for details. Valid values: [1, 2]
305
306
    loss_scale: The factor to scale the loss for numerical stability. A detailed
      summary is present in the arg parser help text.
307
308
309
310
    loss_filter_fn: function that takes a string variable name and returns
      True if the var should be included in loss calculation, and False
      otherwise. If None, batch_normalization variables will be excluded
      from the loss.
311
    dtype: the TensorFlow dtype to use for calculations.
Zac Wellmer's avatar
Zac Wellmer committed
312
    fine_tune: If True only train the dense layers(final layers).
313
314
315
316
317
318
319

  Returns:
    EstimatorSpec parameterized according to the input params and the
    current mode.
  """

  # Generate a summary node for the images
320
  tf.compat.v1.summary.image('images', features, max_outputs=6)
321
322
  # Checks that features/images have same data type being used for calculations.
  assert features.dtype == dtype
323

324
325
  model = model_class(resnet_size, data_format, resnet_version=resnet_version,
                      dtype=dtype)
326

327
328
  logits = model(features, mode == tf.estimator.ModeKeys.TRAIN)

329
330
331
332
333
  # This acts as a no-op if the logits are already in fp32 (provided logits are
  # not a SparseTensor). If dtype is is low precision, logits must be cast to
  # fp32 for numerical stability.
  logits = tf.cast(logits, tf.float32)

334
  predictions = {
335
      'classes': tf.argmax(input=logits, axis=1),
336
337
338
339
      'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
  }

  if mode == tf.estimator.ModeKeys.PREDICT:
340
341
342
343
344
345
346
    # Return the predictions and the specification for serving a SavedModel
    return tf.estimator.EstimatorSpec(
        mode=mode,
        predictions=predictions,
        export_outputs={
            'predict': tf.estimator.export.PredictOutput(predictions)
        })
347
348

  # Calculate loss, which includes softmax cross entropy and L2 regularization.
349
  cross_entropy = tf.compat.v1.losses.sparse_softmax_cross_entropy(
350
      logits=logits, labels=labels)
351
352
353

  # Create a tensor named cross_entropy for logging purposes.
  tf.identity(cross_entropy, name='cross_entropy')
354
  tf.compat.v1.summary.scalar('cross_entropy', cross_entropy)
355
356
357

  # If no loss_filter_fn is passed, assume we want the default behavior,
  # which is that batch_normalization variables are excluded from loss.
Karmel Allison's avatar
Karmel Allison committed
358
359
360
  def exclude_batch_norm(name):
    return 'batch_normalization' not in name
  loss_filter_fn = loss_filter_fn or exclude_batch_norm
361
362

  # Add weight decay to the loss.
363
  l2_loss = weight_decay * tf.add_n(
364
      # loss is computed using fp32 for numerical stability.
365
366
367
      [tf.nn.l2_loss(tf.cast(v, tf.float32))
       for v in tf.compat.v1.trainable_variables() if loss_filter_fn(v.name)])
  tf.compat.v1.summary.scalar('l2_loss', l2_loss)
368
  loss = cross_entropy + l2_loss
369
370

  if mode == tf.estimator.ModeKeys.TRAIN:
371
    global_step = tf.compat.v1.train.get_or_create_global_step()
372
373
374
375
376

    learning_rate = learning_rate_fn(global_step)

    # Create a tensor named learning_rate for logging purposes
    tf.identity(learning_rate, name='learning_rate')
377
    tf.compat.v1.summary.scalar('learning_rate', learning_rate)
378

379
    optimizer = tf.compat.v1.train.MomentumOptimizer(
380
        learning_rate=learning_rate,
381
382
        momentum=momentum
    )
383

Zac Wellmer's avatar
Zac Wellmer committed
384
    def _dense_grad_filter(gvs):
385
386
387
388
      """Only apply gradient updates to the final layer.

      This function is used for fine tuning.

Zac Wellmer's avatar
Zac Wellmer committed
389
      Args:
390
        gvs: list of tuples with gradients and variable info
Zac Wellmer's avatar
Zac Wellmer committed
391
      Returns:
392
393
        filtered gradients so that only the dense layer remains
      """
Zac Wellmer's avatar
Zac Wellmer committed
394
395
      return [(g, v) for g, v in gvs if 'dense' in v.name]

396
397
398
399
400
401
    if loss_scale != 1:
      # When computing fp16 gradients, often intermediate tensor values are
      # so small, they underflow to 0. To avoid this, we multiply the loss by
      # loss_scale to make these tensor values loss_scale times bigger.
      scaled_grad_vars = optimizer.compute_gradients(loss * loss_scale)

Zac Wellmer's avatar
Zac Wellmer committed
402
403
404
      if fine_tune:
        scaled_grad_vars = _dense_grad_filter(scaled_grad_vars)

405
406
407
408
409
410
      # Once the gradient computation is complete we can scale the gradients
      # back to the correct scale before passing them to the optimizer.
      unscaled_grad_vars = [(grad / loss_scale, var)
                            for grad, var in scaled_grad_vars]
      minimize_op = optimizer.apply_gradients(unscaled_grad_vars, global_step)
    else:
Zac Wellmer's avatar
Zac Wellmer committed
411
412
413
414
      grad_vars = optimizer.compute_gradients(loss)
      if fine_tune:
        grad_vars = _dense_grad_filter(grad_vars)
      minimize_op = optimizer.apply_gradients(grad_vars, global_step)
415

416
    update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
417
    train_op = tf.group(minimize_op, update_ops)
418
419
420
  else:
    train_op = None

421
422
423
  accuracy = tf.compat.v1.metrics.accuracy(labels, predictions['classes'])
  accuracy_top_5 = tf.compat.v1.metrics.mean(
      tf.nn.in_top_k(predictions=logits, targets=labels, k=5, name='top_5_op'))
424
425
  metrics = {'accuracy': accuracy,
             'accuracy_top_5': accuracy_top_5}
426
427
428

  # Create a tensor named train_accuracy for logging purposes
  tf.identity(accuracy[1], name='train_accuracy')
429
  tf.identity(accuracy_top_5[1], name='train_accuracy_top_5')
430
431
  tf.compat.v1.summary.scalar('train_accuracy', accuracy[1])
  tf.compat.v1.summary.scalar('train_accuracy_top_5', accuracy_top_5[1])
432
433
434
435
436

  return tf.estimator.EstimatorSpec(
      mode=mode,
      predictions=predictions,
      loss=loss,
437
438
      train_op=train_op,
      eval_metric_ops=metrics)
439
440


441
442
def resnet_main(
    flags_obj, model_function, input_function, dataset_name, shape=None):
443
444
445
  """Shared main loop for ResNet Models.

  Args:
446
447
    flags_obj: An object containing parsed flags. See define_resnet_flags()
      for details.
448
449
450
451
452
    model_function: the function that instantiates the Model and builds the
      ops for train/eval. This will be passed directly into the estimator.
    input_function: the function that processes the dataset and returns a
      dataset that the estimator can train on. This will be wrapped with
      all the relevant flags for running and passed to estimator.
453
454
    dataset_name: the name of the dataset for training and evaluation. This is
      used for logging purpose.
455
    shape: list of ints representing the shape of the images used for training.
456
      This is only used if flags_obj.export_dir is passed.
457
458
459

  Returns:
    Dict of results of the run.
460
  """
Karmel Allison's avatar
Karmel Allison committed
461

462
463
  model_helpers.apply_clean(flags.FLAGS)

Toby Boyd's avatar
Toby Boyd committed
464
  # Ensures flag override logic is only executed if explicitly triggered.
Toby Boyd's avatar
Toby Boyd committed
465
  if flags_obj.tf_gpu_thread_mode:
Toby Boyd's avatar
Toby Boyd committed
466
    override_flags_and_set_envars_for_gpu_thread_pool(flags_obj)
Toby Boyd's avatar
Toby Boyd committed
467

Toby Boyd's avatar
Toby Boyd committed
468
469
  # Creates session config. allow_soft_placement = True, is required for
  # multi-GPU and is not harmful for other modes.
470
  session_config = tf.compat.v1.ConfigProto(
Toby Boyd's avatar
Toby Boyd committed
471
472
473
      inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads,
      intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads,
      allow_soft_placement=True)
474

475
476
  distribution_strategy = distribution_utils.get_distribution_strategy(
      flags_core.get_num_gpus(flags_obj), flags_obj.all_reduce_alg)
477

Toby Boyd's avatar
Toby Boyd committed
478
  # Creates a `RunConfig` that checkpoints every 24 hours which essentially
Toby Boyd's avatar
Toby Boyd committed
479
  # results in checkpoints determined only by `epochs_between_evals`.
480
  run_config = tf.estimator.RunConfig(
Toby Boyd's avatar
Toby Boyd committed
481
482
483
      train_distribute=distribution_strategy,
      session_config=session_config,
      save_checkpoints_secs=60*60*24)
484

Toby Boyd's avatar
Toby Boyd committed
485
  # Initializes model with all but the dense layer from pretrained ResNet.
Zac Wellmer's avatar
Zac Wellmer committed
486
487
488
489
490
491
492
  if flags_obj.pretrained_model_checkpoint_path is not None:
    warm_start_settings = tf.estimator.WarmStartSettings(
        flags_obj.pretrained_model_checkpoint_path,
        vars_to_warm_start='^(?!.*dense)')
  else:
    warm_start_settings = None

493
  classifier = tf.estimator.Estimator(
494
      model_fn=model_function, model_dir=flags_obj.model_dir, config=run_config,
Zac Wellmer's avatar
Zac Wellmer committed
495
      warm_start_from=warm_start_settings, params={
496
497
498
          'resnet_size': int(flags_obj.resnet_size),
          'data_format': flags_obj.data_format,
          'batch_size': flags_obj.batch_size,
499
          'resnet_version': int(flags_obj.resnet_version),
500
          'loss_scale': flags_core.get_loss_scale(flags_obj),
Zac Wellmer's avatar
Zac Wellmer committed
501
502
          'dtype': flags_core.get_tf_dtype(flags_obj),
          'fine_tune': flags_obj.fine_tune
503
504
      })

505
506
507
508
  run_params = {
      'batch_size': flags_obj.batch_size,
      'dtype': flags_core.get_tf_dtype(flags_obj),
      'resnet_size': flags_obj.resnet_size,
509
      'resnet_version': flags_obj.resnet_version,
510
511
512
      'synthetic_data': flags_obj.use_synthetic_data,
      'train_epochs': flags_obj.train_epochs,
  }
513
  if flags_obj.use_synthetic_data:
514
    dataset_name = dataset_name + '-synthetic'
515

516
  benchmark_logger = logger.get_benchmark_logger()
517
518
  benchmark_logger.log_run_info('resnet', dataset_name, run_params,
                                test_id=flags_obj.benchmark_test_id)
519

520
  train_hooks = hooks_helper.get_train_hooks(
521
      flags_obj.hooks,
522
      model_dir=flags_obj.model_dir,
523
      batch_size=flags_obj.batch_size)
524

Taylor Robie's avatar
Taylor Robie committed
525
  def input_fn_train(num_epochs):
526
    return input_function(
Toby Boyd's avatar
Toby Boyd committed
527
528
        is_training=True,
        data_dir=flags_obj.data_dir,
529
        batch_size=distribution_utils.per_device_batch_size(
530
            flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
Taylor Robie's avatar
Taylor Robie committed
531
        num_epochs=num_epochs,
Toby Boyd's avatar
Toby Boyd committed
532
533
        dtype=flags_core.get_tf_dtype(flags_obj),
        datasets_num_private_threads=flags_obj.datasets_num_private_threads,
Toby Boyd's avatar
Toby Boyd committed
534
        num_parallel_batches=flags_obj.datasets_num_parallel_batches)
535

536
  def input_fn_eval():
537
    return input_function(
Toby Boyd's avatar
Toby Boyd committed
538
539
        is_training=False,
        data_dir=flags_obj.data_dir,
540
        batch_size=distribution_utils.per_device_batch_size(
541
            flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
542
543
        num_epochs=1,
        dtype=flags_core.get_tf_dtype(flags_obj))
Taylor Robie's avatar
Taylor Robie committed
544

Taylor Robie's avatar
Taylor Robie committed
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
  if flags_obj.eval_only or not flags_obj.train_epochs:
    # If --eval_only is set, perform a single loop with zero train epochs.
    schedule, n_loops = [0], 1
  else:
    # Compute the number of times to loop while training. All but the last
    # pass will train for `epochs_between_evals` epochs, while the last will
    # train for the number needed to reach `training_epochs`. For instance if
    #   train_epochs = 25 and epochs_between_evals = 10
    # schedule will be set to [10, 10, 5]. That is to say, the loop will:
    #   Train for 10 epochs and then evaluate.
    #   Train for another 10 epochs and then evaluate.
    #   Train for a final 5 epochs (to reach 25 epochs) and then evaluate.
    n_loops = math.ceil(flags_obj.train_epochs / flags_obj.epochs_between_evals)
    schedule = [flags_obj.epochs_between_evals for _ in range(int(n_loops))]
    schedule[-1] = flags_obj.train_epochs - sum(schedule[:-1])  # over counting.

  for cycle_index, num_train_epochs in enumerate(schedule):
562
563
    tf.compat.v1.logging.info('Starting cycle: %d/%d', cycle_index,
                              int(n_loops))
Taylor Robie's avatar
Taylor Robie committed
564
565
566
567

    if num_train_epochs:
      classifier.train(input_fn=lambda: input_fn_train(num_train_epochs),
                       hooks=train_hooks, max_steps=flags_obj.max_train_steps)
568

569
    tf.compat.v1.logging.info('Starting to evaluate.')
570
571
572
573
574

    # flags_obj.max_train_steps is generally associated with testing and
    # profiling. As a result it is frequently called with synthetic data, which
    # will iterate forever. Passing steps=flags_obj.max_train_steps allows the
    # eval (which is generally unimportant in those circumstances) to terminate.
575
576
577
    # Note that eval will run for max_train_steps each loop, regardless of the
    # global_step count.
    eval_results = classifier.evaluate(input_fn=input_fn_eval,
578
                                       steps=flags_obj.max_train_steps)
579

Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
580
    benchmark_logger.log_evaluation_result(eval_results)
581

582
    if model_helpers.past_stop_threshold(
583
        flags_obj.stop_threshold, eval_results['accuracy']):
584
585
      break

586
  if flags_obj.export_dir is not None:
587
    # Exports a saved model for the given classifier.
588
    export_dtype = flags_core.get_tf_dtype(flags_obj)
589
    if flags_obj.image_bytes_as_serving_input:
590
591
      input_receiver_fn = functools.partial(
          image_bytes_serving_input_fn, shape, dtype=export_dtype)
592
593
    else:
      input_receiver_fn = export.build_tensor_serving_input_receiver_fn(
594
595
596
          shape, batch_size=flags_obj.batch_size, dtype=export_dtype)
    classifier.export_savedmodel(flags_obj.export_dir, input_receiver_fn,
                                 strip_default_attrs=True)
597
  return eval_results
598

599
600
601
def define_resnet_flags(resnet_size_choices=None):
  """Add flags and validators for ResNet."""
  flags_core.define_base()
Toby Boyd's avatar
Toby Boyd committed
602
603
604
605
  flags_core.define_performance(num_parallel_calls=False,
                                tf_gpu_thread_mode=True,
                                datasets_num_private_threads=True,
                                datasets_num_parallel_batches=True)
606
607
608
  flags_core.define_image()
  flags_core.define_benchmark()
  flags.adopt_module_key_flags(flags_core)
609

610
  flags.DEFINE_enum(
Toby Boyd's avatar
Toby Boyd committed
611
      name='resnet_version', short_name='rv', default='1',
612
      enum_values=['1', '2'],
613
614
      help=flags_core.help_wrap(
          'Version of ResNet. (1 or 2) See README.md for details.'))
Zac Wellmer's avatar
Zac Wellmer committed
615
616
617
618
619
620
621
622
623
  flags.DEFINE_bool(
      name='fine_tune', short_name='ft', default=False,
      help=flags_core.help_wrap(
          'If True do not train any parameters except for the final layer.'))
  flags.DEFINE_string(
      name='pretrained_model_checkpoint_path', short_name='pmcp', default=None,
      help=flags_core.help_wrap(
          'If not None initialize all the network except the final layer with '
          'these values'))
Taylor Robie's avatar
Taylor Robie committed
624
  flags.DEFINE_boolean(
625
      name='eval_only', default=False,
Taylor Robie's avatar
Taylor Robie committed
626
627
      help=flags_core.help_wrap('Skip training and only perform evaluation on '
                                'the latest checkpoint.'))
628
  flags.DEFINE_boolean(
Toby Boyd's avatar
Toby Boyd committed
629
      name='image_bytes_as_serving_input', default=False,
630
631
632
633
634
635
636
      help=flags_core.help_wrap(
          'If True exports savedmodel with serving signature that accepts '
          'JPEG image bytes instead of a fixed size [HxWxC] tensor that '
          'represents the image. The former is easier to use for serving at '
          'the expense of image resize/cropping being done as part of model '
          'inference. Note, this flag only applies to ImageNet and cannot '
          'be used for CIFAR.'))
Toby Boyd's avatar
Toby Boyd committed
637
  flags.DEFINE_boolean(
Shining Sun's avatar
Shining Sun committed
638
639
      name='turn_off_distribution_strategy', default=False,
      help=flags_core.help_wrap('Set to True to not use distribution '
Toby Boyd's avatar
Toby Boyd committed
640
                                'strategies.'))
641
642
643
  choice_kwargs = dict(
      name='resnet_size', short_name='rs', default='50',
      help=flags_core.help_wrap('The size of the ResNet model to use.'))
644

645
646
647
648
  if resnet_size_choices is None:
    flags.DEFINE_string(**choice_kwargs)
  else:
    flags.DEFINE_enum(enum_values=resnet_size_choices, **choice_kwargs)