resnet_run_loop.py 27.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains utility and supporting functions for ResNet.

  This module contains ResNet code which does not directly build layers. This
includes dataset management, hyperparameter and optimizer code, and argument
parsing. Code for defining the ResNet layers can be found in resnet_model.py.
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

26
import functools
Taylor Robie's avatar
Taylor Robie committed
27
import math
Toby Boyd's avatar
Toby Boyd committed
28
import multiprocessing
29
30
import os

31
# pylint: disable=g-bad-import-order
32
from absl import flags
33
import tensorflow as tf
34
35

from official.resnet import resnet_model
36
from official.utils.flags import core as flags_core
37
from official.utils.export import export
38
39
from official.utils.logs import hooks_helper
from official.utils.logs import logger
40
from official.resnet import imagenet_preprocessing
41
from official.utils.misc import distribution_utils
42
from official.utils.misc import model_helpers
43
44
45
46
47


################################################################################
# Functions for input processing.
################################################################################
Toby Boyd's avatar
Toby Boyd committed
48
49
50
51
52
53
54
55
56
def process_record_dataset(dataset,
                           is_training,
                           batch_size,
                           shuffle_buffer,
                           parse_record_fn,
                           num_epochs=1,
                           dtype=tf.float32,
                           datasets_num_private_threads=None,
                           num_parallel_batches=1):
Karmel Allison's avatar
Karmel Allison committed
57
  """Given a Dataset with raw records, return an iterator over the records.
58
59
60
61
62
63
64
65
66
67
68

  Args:
    dataset: A Dataset representing raw records
    is_training: A boolean denoting whether the input is for training.
    batch_size: The number of samples per batch.
    shuffle_buffer: The buffer size to use when shuffling records. A larger
      value results in better randomness, but smaller values reduce startup
      time and use less memory.
    parse_record_fn: A function that takes a raw record and returns the
      corresponding (image, label) pair.
    num_epochs: The number of epochs to repeat the dataset.
69
    dtype: Data type to use for images/features.
Toby Boyd's avatar
Toby Boyd committed
70
71
72
    datasets_num_private_threads: Number of threads for a private
      threadpool created for all datasets computation.
    num_parallel_batches: Number of parallel batches for tf.data.
73
74
75
76

  Returns:
    Dataset of (image, label) pairs ready for iteration.
  """
77
78
79
80
81
82
83
84
85
  # Defines a specific size thread pool for tf.data operations.
  if datasets_num_private_threads:
    options = tf.data.Options()
    options.experimental_threading = tf.data.experimental.ThreadingOptions()
    options.experimental_threading.private_threadpool_size = (
        datasets_num_private_threads)
    dataset = dataset.with_options(options)
    tf.compat.v1.logging.info('datasets_num_private_threads: %s',
                              datasets_num_private_threads)
86

87
88
  # Prefetches a batch at a time to smooth out the time taken to load input
  # files for shuffling and processing.
89
90
  dataset = dataset.prefetch(buffer_size=batch_size)
  if is_training:
91
    # Shuffles records before repeating to respect epoch boundaries.
92
93
    dataset = dataset.shuffle(buffer_size=shuffle_buffer)

94
  # Repeats the dataset for the number of epochs to train.
95
96
  dataset = dataset.repeat(num_epochs)

97
  # Parses the raw records into images and labels.
98
  dataset = dataset.apply(
99
      tf.data.experimental.map_and_batch(
100
          lambda value: parse_record_fn(value, is_training, dtype),
101
          batch_size=batch_size,
Toby Boyd's avatar
Toby Boyd committed
102
          num_parallel_batches=num_parallel_batches,
103
          drop_remainder=False))
104
105
106
107

  # Operations between the final prefetch and the get_next call to the iterator
  # will happen synchronously during run time. We prefetch here again to
  # background all of the above processing work and keep it out of the
108
109
110
  # critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE
  # allows DistributionStrategies to adjust how many batches to fetch based
  # on how many devices are present.
111
  dataset = dataset.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
112
113
114
115

  return dataset


Toby Boyd's avatar
Toby Boyd committed
116
117
118
def get_synth_input_fn(height, width, num_channels, num_classes,
                       dtype=tf.float32):
  """Returns an input function that returns a dataset with random data.
119

Toby Boyd's avatar
Toby Boyd committed
120
121
122
123
  This input_fn returns a data set that iterates over a set of random data and
  bypasses all preprocessing, e.g. jpeg decode and copy. The host to device
  copy is still included. This used to find the upper throughput bound when
  tunning the full input pipeline.
124
125
126
127
128
129
130

  Args:
    height: Integer height that will be used to create a fake image tensor.
    width: Integer width that will be used to create a fake image tensor.
    num_channels: Integer depth that will be used to create a fake image tensor.
    num_classes: Number of classes that should be represented in the fake labels
      tensor
Toby Boyd's avatar
Toby Boyd committed
131
    dtype: Data type for features/images.
132
133
134
135
136

  Returns:
    An input_fn that can be used in place of a real one to return a dataset
    that can be used for iteration.
  """
Toby Boyd's avatar
Toby Boyd committed
137
138
139
140
  # pylint: disable=unused-argument
  def input_fn(is_training, data_dir, batch_size, *args, **kwargs):
    """Returns dataset filled with random data."""
    # Synthetic input should be within [0, 255].
141
    inputs = tf.random.truncated_normal(
Toby Boyd's avatar
Toby Boyd committed
142
143
144
145
146
147
        [batch_size] + [height, width, num_channels],
        dtype=dtype,
        mean=127,
        stddev=60,
        name='synthetic_inputs')

148
    labels = tf.random.uniform(
Toby Boyd's avatar
Toby Boyd committed
149
150
151
152
153
154
        [batch_size],
        minval=0,
        maxval=num_classes - 1,
        dtype=tf.int32,
        name='synthetic_labels')
    data = tf.data.Dataset.from_tensors((inputs, labels)).repeat()
155
    data = data.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
Toby Boyd's avatar
Toby Boyd committed
156
    return data
157
158
159
160

  return input_fn


161
def image_bytes_serving_input_fn(image_shape, dtype=tf.float32):
162
163
164
165
166
  """Serving input fn for raw jpeg images."""

  def _preprocess_image(image_bytes):
    """Preprocess a single raw image."""
    # Bounding box around the whole image.
167
    bbox = tf.constant([0.0, 0.0, 1.0, 1.0], dtype=dtype, shape=[1, 1, 4])
168
169
170
171
172
    height, width, num_channels = image_shape
    image = imagenet_preprocessing.preprocess_image(
        image_bytes, bbox, height, width, num_channels, is_training=False)
    return image

173
  image_bytes_list = tf.compat.v1.placeholder(
174
175
      shape=[None], dtype=tf.string, name='input_tensor')
  images = tf.map_fn(
176
      _preprocess_image, image_bytes_list, back_prop=False, dtype=dtype)
177
178
179
180
  return tf.estimator.export.TensorServingInputReceiver(
      images, {'image_bytes': image_bytes_list})


Toby Boyd's avatar
Toby Boyd committed
181
def override_flags_and_set_envars_for_gpu_thread_pool(flags_obj):
Toby Boyd's avatar
Toby Boyd committed
182
  """Override flags and set env_vars for performance.
Toby Boyd's avatar
Toby Boyd committed
183
184
185
186
187
188
189

  These settings exist to test the difference between using stock settings
  and manual tuning. It also shows some of the ENV_VARS that can be tweaked to
  squeeze a few extra examples per second.  These settings are defaulted to the
  current platform of interest, which changes over time.

  On systems with small numbers of cpu cores, e.g. under 8 logical cores,
Toby Boyd's avatar
Toby Boyd committed
190
191
  setting up a gpu thread pool with `tf_gpu_thread_mode=gpu_private` may perform
  poorly.
Toby Boyd's avatar
Toby Boyd committed
192
193
194
195
196

  Args:
    flags_obj: Current flags, which will be adjusted possibly overriding
    what has been set by the user on the command-line.
  """
Toby Boyd's avatar
Toby Boyd committed
197
  cpu_count = multiprocessing.cpu_count()
198
  tf.compat.v1.logging.info('Logical CPU cores: %s', cpu_count)
Toby Boyd's avatar
Toby Boyd committed
199
200
201
202
203
204

  # Sets up thread pool for each GPU for op scheduling.
  per_gpu_thread_count = 1
  total_gpu_thread_count = per_gpu_thread_count * flags_obj.num_gpus
  os.environ['TF_GPU_THREAD_MODE'] = flags_obj.tf_gpu_thread_mode
  os.environ['TF_GPU_THREAD_COUNT'] = str(per_gpu_thread_count)
205
206
207
208
  tf.compat.v1.logging.info('TF_GPU_THREAD_COUNT: %s',
                            os.environ['TF_GPU_THREAD_COUNT'])
  tf.compat.v1.logging.info('TF_GPU_THREAD_MODE: %s',
                            os.environ['TF_GPU_THREAD_MODE'])
Toby Boyd's avatar
Toby Boyd committed
209
210
211
212
213
214
215
216
217

  # Reduces general thread pool by number of threads used for GPU pool.
  main_thread_count = cpu_count - total_gpu_thread_count
  flags_obj.inter_op_parallelism_threads = main_thread_count

  # Sets thread count for tf.data. Logical cores minus threads assign to the
  # private GPU pool along with 2 thread per GPU for event monitoring and
  # sending / receiving tensors.
  num_monitoring_threads = 2 * flags_obj.num_gpus
Toby Boyd's avatar
Toby Boyd committed
218
219
  flags_obj.datasets_num_private_threads = (cpu_count - total_gpu_thread_count
                                            - num_monitoring_threads)
Toby Boyd's avatar
Toby Boyd committed
220
221


222
223
224
225
################################################################################
# Functions for running training/eval/validation loops for the model.
################################################################################
def learning_rate_with_decay(
226
227
    batch_size, batch_denom, num_images, boundary_epochs, decay_rates,
    base_lr=0.1, warmup=False):
228
229
230
231
232
233
234
235
236
237
238
  """Get a learning rate that decays step-wise as training progresses.

  Args:
    batch_size: the number of examples processed in each training batch.
    batch_denom: this value will be used to scale the base learning rate.
      `0.1 * batch size` is divided by this number, such that when
      batch_denom == batch_size, the initial learning rate will be 0.1.
    num_images: total number of images that will be used for training.
    boundary_epochs: list of ints representing the epochs at which we
      decay the learning rate.
    decay_rates: list of floats representing the decay rates to be used
239
240
      for scaling the learning rate. It should have one more element
      than `boundary_epochs`, and all elements should have the same type.
241
242
    base_lr: Initial learning rate scaled based on batch_denom.
    warmup: Run a 5 epoch warmup to the initial lr.
243
244
245
246
247
  Returns:
    Returns a function that takes a single argument - the number of batches
    trained so far (global_step)- and returns the learning rate to be used
    for training the next batch.
  """
248
  initial_learning_rate = base_lr * batch_size / batch_denom
249
250
  batches_per_epoch = num_images / batch_size

Taylor Robie's avatar
Taylor Robie committed
251
252
253
  # Reduce the learning rate at certain epochs.
  # CIFAR-10: divide by 10 at epoch 100, 150, and 200
  # ImageNet: divide by 10 at epoch 30, 60, 80, and 90
254
255
256
257
  boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]
  vals = [initial_learning_rate * decay for decay in decay_rates]

  def learning_rate_fn(global_step):
258
    """Builds scaled learning rate function with 5 epoch warm up."""
259
    lr = tf.compat.v1.train.piecewise_constant(global_step, boundaries, vals)
260
261
262
263
264
    if warmup:
      warmup_steps = int(batches_per_epoch * 5)
      warmup_lr = (
          initial_learning_rate * tf.cast(global_step, tf.float32) / tf.cast(
              warmup_steps, tf.float32))
265
266
267
      return tf.cond(pred=global_step < warmup_steps,
                     true_fn=lambda: warmup_lr,
                     false_fn=lambda: lr)
268
    return lr
269
270
271
272
273
274

  return learning_rate_fn


def resnet_model_fn(features, labels, mode, model_class,
                    resnet_size, weight_decay, learning_rate_fn, momentum,
275
                    data_format, resnet_version, loss_scale,
Zac Wellmer's avatar
Zac Wellmer committed
276
277
                    loss_filter_fn=None, dtype=resnet_model.DEFAULT_DTYPE,
                    fine_tune=False):
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
  """Shared functionality for different resnet model_fns.

  Initializes the ResnetModel representing the model layers
  and uses that model to build the necessary EstimatorSpecs for
  the `mode` in question. For training, this means building losses,
  the optimizer, and the train op that get passed into the EstimatorSpec.
  For evaluation and prediction, the EstimatorSpec is returned without
  a train op, but with the necessary parameters for the given mode.

  Args:
    features: tensor representing input images
    labels: tensor representing class labels for all input images
    mode: current estimator mode; should be one of
      `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`
    model_class: a class representing a TensorFlow model that has a __call__
      function. We assume here that this is a subclass of ResnetModel.
    resnet_size: A single integer for the size of the ResNet model.
    weight_decay: weight decay loss rate used to regularize learned variables.
    learning_rate_fn: function that returns the current learning rate given
      the current global_step
    momentum: momentum term used for optimization
    data_format: Input format ('channels_last', 'channels_first', or None).
      If set to None, the format is dependent on whether a GPU is available.
301
302
    resnet_version: Integer representing which version of the ResNet network to
      use. See README for details. Valid values: [1, 2]
303
304
    loss_scale: The factor to scale the loss for numerical stability. A detailed
      summary is present in the arg parser help text.
305
306
307
308
    loss_filter_fn: function that takes a string variable name and returns
      True if the var should be included in loss calculation, and False
      otherwise. If None, batch_normalization variables will be excluded
      from the loss.
309
    dtype: the TensorFlow dtype to use for calculations.
Zac Wellmer's avatar
Zac Wellmer committed
310
    fine_tune: If True only train the dense layers(final layers).
311
312
313
314
315
316
317

  Returns:
    EstimatorSpec parameterized according to the input params and the
    current mode.
  """

  # Generate a summary node for the images
318
  tf.compat.v1.summary.image('images', features, max_outputs=6)
319
320
  # Checks that features/images have same data type being used for calculations.
  assert features.dtype == dtype
321

322
323
  model = model_class(resnet_size, data_format, resnet_version=resnet_version,
                      dtype=dtype)
324

325
326
  logits = model(features, mode == tf.estimator.ModeKeys.TRAIN)

327
328
329
330
331
  # This acts as a no-op if the logits are already in fp32 (provided logits are
  # not a SparseTensor). If dtype is is low precision, logits must be cast to
  # fp32 for numerical stability.
  logits = tf.cast(logits, tf.float32)

332
  predictions = {
333
      'classes': tf.argmax(input=logits, axis=1),
334
335
336
337
      'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
  }

  if mode == tf.estimator.ModeKeys.PREDICT:
338
339
340
341
342
343
344
    # Return the predictions and the specification for serving a SavedModel
    return tf.estimator.EstimatorSpec(
        mode=mode,
        predictions=predictions,
        export_outputs={
            'predict': tf.estimator.export.PredictOutput(predictions)
        })
345
346

  # Calculate loss, which includes softmax cross entropy and L2 regularization.
347
  cross_entropy = tf.compat.v1.losses.sparse_softmax_cross_entropy(
348
      logits=logits, labels=labels)
349
350
351

  # Create a tensor named cross_entropy for logging purposes.
  tf.identity(cross_entropy, name='cross_entropy')
352
  tf.compat.v1.summary.scalar('cross_entropy', cross_entropy)
353
354
355

  # If no loss_filter_fn is passed, assume we want the default behavior,
  # which is that batch_normalization variables are excluded from loss.
Karmel Allison's avatar
Karmel Allison committed
356
357
358
  def exclude_batch_norm(name):
    return 'batch_normalization' not in name
  loss_filter_fn = loss_filter_fn or exclude_batch_norm
359
360

  # Add weight decay to the loss.
361
  l2_loss = weight_decay * tf.add_n(
362
      # loss is computed using fp32 for numerical stability.
363
364
365
      [tf.nn.l2_loss(tf.cast(v, tf.float32))
       for v in tf.compat.v1.trainable_variables() if loss_filter_fn(v.name)])
  tf.compat.v1.summary.scalar('l2_loss', l2_loss)
366
  loss = cross_entropy + l2_loss
367
368

  if mode == tf.estimator.ModeKeys.TRAIN:
369
    global_step = tf.compat.v1.train.get_or_create_global_step()
370
371
372
373
374

    learning_rate = learning_rate_fn(global_step)

    # Create a tensor named learning_rate for logging purposes
    tf.identity(learning_rate, name='learning_rate')
375
    tf.compat.v1.summary.scalar('learning_rate', learning_rate)
376

377
    optimizer = tf.compat.v1.train.MomentumOptimizer(
378
        learning_rate=learning_rate,
379
380
        momentum=momentum
    )
381

Zac Wellmer's avatar
Zac Wellmer committed
382
    def _dense_grad_filter(gvs):
383
384
385
386
      """Only apply gradient updates to the final layer.

      This function is used for fine tuning.

Zac Wellmer's avatar
Zac Wellmer committed
387
      Args:
388
        gvs: list of tuples with gradients and variable info
Zac Wellmer's avatar
Zac Wellmer committed
389
      Returns:
390
391
        filtered gradients so that only the dense layer remains
      """
Zac Wellmer's avatar
Zac Wellmer committed
392
393
      return [(g, v) for g, v in gvs if 'dense' in v.name]

394
395
396
397
398
399
    if loss_scale != 1:
      # When computing fp16 gradients, often intermediate tensor values are
      # so small, they underflow to 0. To avoid this, we multiply the loss by
      # loss_scale to make these tensor values loss_scale times bigger.
      scaled_grad_vars = optimizer.compute_gradients(loss * loss_scale)

Zac Wellmer's avatar
Zac Wellmer committed
400
401
402
      if fine_tune:
        scaled_grad_vars = _dense_grad_filter(scaled_grad_vars)

403
404
405
406
407
408
      # Once the gradient computation is complete we can scale the gradients
      # back to the correct scale before passing them to the optimizer.
      unscaled_grad_vars = [(grad / loss_scale, var)
                            for grad, var in scaled_grad_vars]
      minimize_op = optimizer.apply_gradients(unscaled_grad_vars, global_step)
    else:
Zac Wellmer's avatar
Zac Wellmer committed
409
410
411
412
      grad_vars = optimizer.compute_gradients(loss)
      if fine_tune:
        grad_vars = _dense_grad_filter(grad_vars)
      minimize_op = optimizer.apply_gradients(grad_vars, global_step)
413

414
    update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
415
    train_op = tf.group(minimize_op, update_ops)
416
417
418
  else:
    train_op = None

419
420
421
  accuracy = tf.compat.v1.metrics.accuracy(labels, predictions['classes'])
  accuracy_top_5 = tf.compat.v1.metrics.mean(
      tf.nn.in_top_k(predictions=logits, targets=labels, k=5, name='top_5_op'))
422
423
  metrics = {'accuracy': accuracy,
             'accuracy_top_5': accuracy_top_5}
424
425
426

  # Create a tensor named train_accuracy for logging purposes
  tf.identity(accuracy[1], name='train_accuracy')
427
  tf.identity(accuracy_top_5[1], name='train_accuracy_top_5')
428
429
  tf.compat.v1.summary.scalar('train_accuracy', accuracy[1])
  tf.compat.v1.summary.scalar('train_accuracy_top_5', accuracy_top_5[1])
430
431
432
433
434

  return tf.estimator.EstimatorSpec(
      mode=mode,
      predictions=predictions,
      loss=loss,
435
436
      train_op=train_op,
      eval_metric_ops=metrics)
437
438


439
440
def resnet_main(
    flags_obj, model_function, input_function, dataset_name, shape=None):
441
442
443
  """Shared main loop for ResNet Models.

  Args:
444
445
    flags_obj: An object containing parsed flags. See define_resnet_flags()
      for details.
446
447
448
449
450
    model_function: the function that instantiates the Model and builds the
      ops for train/eval. This will be passed directly into the estimator.
    input_function: the function that processes the dataset and returns a
      dataset that the estimator can train on. This will be wrapped with
      all the relevant flags for running and passed to estimator.
451
452
    dataset_name: the name of the dataset for training and evaluation. This is
      used for logging purpose.
453
    shape: list of ints representing the shape of the images used for training.
454
      This is only used if flags_obj.export_dir is passed.
455
456
457

  Returns:
    Dict of results of the run.
458
  """
Karmel Allison's avatar
Karmel Allison committed
459

460
461
  model_helpers.apply_clean(flags.FLAGS)

Toby Boyd's avatar
Toby Boyd committed
462
  # Ensures flag override logic is only executed if explicitly triggered.
Toby Boyd's avatar
Toby Boyd committed
463
  if flags_obj.tf_gpu_thread_mode:
Toby Boyd's avatar
Toby Boyd committed
464
    override_flags_and_set_envars_for_gpu_thread_pool(flags_obj)
Toby Boyd's avatar
Toby Boyd committed
465

Toby Boyd's avatar
Toby Boyd committed
466
467
  # Creates session config. allow_soft_placement = True, is required for
  # multi-GPU and is not harmful for other modes.
468
  session_config = tf.compat.v1.ConfigProto(
Toby Boyd's avatar
Toby Boyd committed
469
470
471
      inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads,
      intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads,
      allow_soft_placement=True)
472

473
474
  distribution_strategy = distribution_utils.get_distribution_strategy(
      flags_core.get_num_gpus(flags_obj), flags_obj.all_reduce_alg)
475

Toby Boyd's avatar
Toby Boyd committed
476
  # Creates a `RunConfig` that checkpoints every 24 hours which essentially
Toby Boyd's avatar
Toby Boyd committed
477
  # results in checkpoints determined only by `epochs_between_evals`.
478
  run_config = tf.estimator.RunConfig(
Toby Boyd's avatar
Toby Boyd committed
479
480
481
      train_distribute=distribution_strategy,
      session_config=session_config,
      save_checkpoints_secs=60*60*24)
482

Toby Boyd's avatar
Toby Boyd committed
483
  # Initializes model with all but the dense layer from pretrained ResNet.
Zac Wellmer's avatar
Zac Wellmer committed
484
485
486
487
488
489
490
  if flags_obj.pretrained_model_checkpoint_path is not None:
    warm_start_settings = tf.estimator.WarmStartSettings(
        flags_obj.pretrained_model_checkpoint_path,
        vars_to_warm_start='^(?!.*dense)')
  else:
    warm_start_settings = None

491
  classifier = tf.estimator.Estimator(
492
      model_fn=model_function, model_dir=flags_obj.model_dir, config=run_config,
Zac Wellmer's avatar
Zac Wellmer committed
493
      warm_start_from=warm_start_settings, params={
494
495
496
          'resnet_size': int(flags_obj.resnet_size),
          'data_format': flags_obj.data_format,
          'batch_size': flags_obj.batch_size,
497
          'resnet_version': int(flags_obj.resnet_version),
498
          'loss_scale': flags_core.get_loss_scale(flags_obj),
Zac Wellmer's avatar
Zac Wellmer committed
499
500
          'dtype': flags_core.get_tf_dtype(flags_obj),
          'fine_tune': flags_obj.fine_tune
501
502
      })

503
504
505
506
  run_params = {
      'batch_size': flags_obj.batch_size,
      'dtype': flags_core.get_tf_dtype(flags_obj),
      'resnet_size': flags_obj.resnet_size,
507
      'resnet_version': flags_obj.resnet_version,
508
509
510
      'synthetic_data': flags_obj.use_synthetic_data,
      'train_epochs': flags_obj.train_epochs,
  }
511
  if flags_obj.use_synthetic_data:
512
    dataset_name = dataset_name + '-synthetic'
513

514
  benchmark_logger = logger.get_benchmark_logger()
515
516
  benchmark_logger.log_run_info('resnet', dataset_name, run_params,
                                test_id=flags_obj.benchmark_test_id)
517

518
  train_hooks = hooks_helper.get_train_hooks(
519
      flags_obj.hooks,
520
      model_dir=flags_obj.model_dir,
521
      batch_size=flags_obj.batch_size)
522

Taylor Robie's avatar
Taylor Robie committed
523
  def input_fn_train(num_epochs):
524
    return input_function(
Toby Boyd's avatar
Toby Boyd committed
525
526
        is_training=True,
        data_dir=flags_obj.data_dir,
527
        batch_size=distribution_utils.per_device_batch_size(
528
            flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
Taylor Robie's avatar
Taylor Robie committed
529
        num_epochs=num_epochs,
Toby Boyd's avatar
Toby Boyd committed
530
531
        dtype=flags_core.get_tf_dtype(flags_obj),
        datasets_num_private_threads=flags_obj.datasets_num_private_threads,
Toby Boyd's avatar
Toby Boyd committed
532
        num_parallel_batches=flags_obj.datasets_num_parallel_batches)
533

534
  def input_fn_eval():
535
    return input_function(
Toby Boyd's avatar
Toby Boyd committed
536
537
        is_training=False,
        data_dir=flags_obj.data_dir,
538
        batch_size=distribution_utils.per_device_batch_size(
539
            flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
540
541
        num_epochs=1,
        dtype=flags_core.get_tf_dtype(flags_obj))
Taylor Robie's avatar
Taylor Robie committed
542

Taylor Robie's avatar
Taylor Robie committed
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
  if flags_obj.eval_only or not flags_obj.train_epochs:
    # If --eval_only is set, perform a single loop with zero train epochs.
    schedule, n_loops = [0], 1
  else:
    # Compute the number of times to loop while training. All but the last
    # pass will train for `epochs_between_evals` epochs, while the last will
    # train for the number needed to reach `training_epochs`. For instance if
    #   train_epochs = 25 and epochs_between_evals = 10
    # schedule will be set to [10, 10, 5]. That is to say, the loop will:
    #   Train for 10 epochs and then evaluate.
    #   Train for another 10 epochs and then evaluate.
    #   Train for a final 5 epochs (to reach 25 epochs) and then evaluate.
    n_loops = math.ceil(flags_obj.train_epochs / flags_obj.epochs_between_evals)
    schedule = [flags_obj.epochs_between_evals for _ in range(int(n_loops))]
    schedule[-1] = flags_obj.train_epochs - sum(schedule[:-1])  # over counting.

  for cycle_index, num_train_epochs in enumerate(schedule):
560
561
    tf.compat.v1.logging.info('Starting cycle: %d/%d', cycle_index,
                              int(n_loops))
Taylor Robie's avatar
Taylor Robie committed
562
563
564
565

    if num_train_epochs:
      classifier.train(input_fn=lambda: input_fn_train(num_train_epochs),
                       hooks=train_hooks, max_steps=flags_obj.max_train_steps)
566

567
    tf.compat.v1.logging.info('Starting to evaluate.')
568
569
570
571
572

    # flags_obj.max_train_steps is generally associated with testing and
    # profiling. As a result it is frequently called with synthetic data, which
    # will iterate forever. Passing steps=flags_obj.max_train_steps allows the
    # eval (which is generally unimportant in those circumstances) to terminate.
573
574
575
    # Note that eval will run for max_train_steps each loop, regardless of the
    # global_step count.
    eval_results = classifier.evaluate(input_fn=input_fn_eval,
576
                                       steps=flags_obj.max_train_steps)
577

Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
578
    benchmark_logger.log_evaluation_result(eval_results)
579

580
    if model_helpers.past_stop_threshold(
581
        flags_obj.stop_threshold, eval_results['accuracy']):
582
583
      break

584
  if flags_obj.export_dir is not None:
585
    # Exports a saved model for the given classifier.
586
    export_dtype = flags_core.get_tf_dtype(flags_obj)
587
    if flags_obj.image_bytes_as_serving_input:
588
589
      input_receiver_fn = functools.partial(
          image_bytes_serving_input_fn, shape, dtype=export_dtype)
590
591
    else:
      input_receiver_fn = export.build_tensor_serving_input_receiver_fn(
592
593
594
          shape, batch_size=flags_obj.batch_size, dtype=export_dtype)
    classifier.export_savedmodel(flags_obj.export_dir, input_receiver_fn,
                                 strip_default_attrs=True)
595
  return eval_results
596

597
598
599
def define_resnet_flags(resnet_size_choices=None):
  """Add flags and validators for ResNet."""
  flags_core.define_base()
Toby Boyd's avatar
Toby Boyd committed
600
601
602
603
  flags_core.define_performance(num_parallel_calls=False,
                                tf_gpu_thread_mode=True,
                                datasets_num_private_threads=True,
                                datasets_num_parallel_batches=True)
604
605
606
  flags_core.define_image()
  flags_core.define_benchmark()
  flags.adopt_module_key_flags(flags_core)
607

608
  flags.DEFINE_enum(
Toby Boyd's avatar
Toby Boyd committed
609
      name='resnet_version', short_name='rv', default='1',
610
      enum_values=['1', '2'],
611
612
      help=flags_core.help_wrap(
          'Version of ResNet. (1 or 2) See README.md for details.'))
Zac Wellmer's avatar
Zac Wellmer committed
613
614
615
616
617
618
619
620
621
  flags.DEFINE_bool(
      name='fine_tune', short_name='ft', default=False,
      help=flags_core.help_wrap(
          'If True do not train any parameters except for the final layer.'))
  flags.DEFINE_string(
      name='pretrained_model_checkpoint_path', short_name='pmcp', default=None,
      help=flags_core.help_wrap(
          'If not None initialize all the network except the final layer with '
          'these values'))
Taylor Robie's avatar
Taylor Robie committed
622
  flags.DEFINE_boolean(
623
      name='eval_only', default=False,
Taylor Robie's avatar
Taylor Robie committed
624
625
      help=flags_core.help_wrap('Skip training and only perform evaluation on '
                                'the latest checkpoint.'))
626
  flags.DEFINE_boolean(
Toby Boyd's avatar
Toby Boyd committed
627
      name='image_bytes_as_serving_input', default=False,
628
629
630
631
632
633
634
      help=flags_core.help_wrap(
          'If True exports savedmodel with serving signature that accepts '
          'JPEG image bytes instead of a fixed size [HxWxC] tensor that '
          'represents the image. The former is easier to use for serving at '
          'the expense of image resize/cropping being done as part of model '
          'inference. Note, this flag only applies to ImageNet and cannot '
          'be used for CIFAR.'))
Toby Boyd's avatar
Toby Boyd committed
635
  flags.DEFINE_boolean(
Shining Sun's avatar
Shining Sun committed
636
637
      name='turn_off_distribution_strategy', default=False,
      help=flags_core.help_wrap('Set to True to not use distribution '
Toby Boyd's avatar
Toby Boyd committed
638
                                'strategies.'))
639
640
641
  choice_kwargs = dict(
      name='resnet_size', short_name='rs', default='50',
      help=flags_core.help_wrap('The size of the ResNet model to use.'))
642

643
644
645
646
  if resnet_size_choices is None:
    flags.DEFINE_string(**choice_kwargs)
  else:
    flags.DEFINE_enum(enum_values=resnet_size_choices, **choice_kwargs)