resnet_run_loop.py 22.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains utility and supporting functions for ResNet.

  This module contains ResNet code which does not directly build layers. This
includes dataset management, hyperparameter and optimizer code, and argument
parsing. Code for defining the ResNet layers can be found in resnet_model.py.
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

Taylor Robie's avatar
Taylor Robie committed
26
import math
27
28
import os

29
# pylint: disable=g-bad-import-order
30
from absl import flags
31
import tensorflow as tf
32
33

from official.resnet import resnet_model
34
from official.utils.flags import core as flags_core
35
from official.utils.export import export
36
37
from official.utils.logs import hooks_helper
from official.utils.logs import logger
38
from official.utils.misc import distribution_utils
39
from official.utils.misc import model_helpers
40
# pylint: enable=g-bad-import-order
41
42
43
44
45
46


################################################################################
# Functions for input processing.
################################################################################
def process_record_dataset(dataset, is_training, batch_size, shuffle_buffer,
Taylor Robie's avatar
Taylor Robie committed
47
48
                           parse_record_fn, num_epochs=1, num_gpus=None,
                           examples_per_epoch=None):
Karmel Allison's avatar
Karmel Allison committed
49
  """Given a Dataset with raw records, return an iterator over the records.
50
51
52
53
54
55
56
57
58
59
60

  Args:
    dataset: A Dataset representing raw records
    is_training: A boolean denoting whether the input is for training.
    batch_size: The number of samples per batch.
    shuffle_buffer: The buffer size to use when shuffling records. A larger
      value results in better randomness, but smaller values reduce startup
      time and use less memory.
    parse_record_fn: A function that takes a raw record and returns the
      corresponding (image, label) pair.
    num_epochs: The number of epochs to repeat the dataset.
Taylor Robie's avatar
Taylor Robie committed
61
62
    num_gpus: The number of gpus used for training.
    examples_per_epoch: The number of examples in an epoch.
63
64
65
66

  Returns:
    Dataset of (image, label) pairs ready for iteration.
  """
67

68
69
70
71
72
73
74
75
76
77
78
79
  # We prefetch a batch at a time, This can help smooth out the time taken to
  # load input files as we go through shuffling and processing.
  dataset = dataset.prefetch(buffer_size=batch_size)
  if is_training:
    # Shuffle the records. Note that we shuffle before repeating to ensure
    # that the shuffling respects epoch boundaries.
    dataset = dataset.shuffle(buffer_size=shuffle_buffer)

  # If we are training over multiple epochs before evaluating, repeat the
  # dataset for the appropriate number of epochs.
  dataset = dataset.repeat(num_epochs)

Taylor Robie's avatar
Taylor Robie committed
80
81
82
83
84
85
86
87
88
89
  if is_training and num_gpus and examples_per_epoch:
    total_examples = num_epochs * examples_per_epoch
    # Force the number of batches to be divisible by the number of devices.
    # This prevents some devices from receiving batches while others do not,
    # which can lead to a lockup. This case will soon be handled directly by
    # distribution strategies, at which point this .take() operation will no
    # longer be needed.
    total_batches = total_examples // batch_size // num_gpus * num_gpus
    dataset.take(total_batches * batch_size)

90
91
92
93
94
95
96
  # Parse the raw records into images and labels. Testing has shown that setting
  # num_parallel_batches > 1 produces no improvement in throughput, since
  # batch_size is almost always much greater than the number of CPU cores.
  dataset = dataset.apply(
      tf.contrib.data.map_and_batch(
          lambda value: parse_record_fn(value, is_training),
          batch_size=batch_size,
97
          num_parallel_batches=1,
98
          drop_remainder=False))
99
100
101
102

  # Operations between the final prefetch and the get_next call to the iterator
  # will happen synchronously during run time. We prefetch here again to
  # background all of the above processing work and keep it out of the
103
104
105
  # critical training path. Setting buffer_size to tf.contrib.data.AUTOTUNE
  # allows DistributionStrategies to adjust how many batches to fetch based
  # on how many devices are present.
106
  dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127

  return dataset


def get_synth_input_fn(height, width, num_channels, num_classes):
  """Returns an input function that returns a dataset with zeroes.

  This is useful in debugging input pipeline performance, as it removes all
  elements of file reading and image preprocessing.

  Args:
    height: Integer height that will be used to create a fake image tensor.
    width: Integer width that will be used to create a fake image tensor.
    num_channels: Integer depth that will be used to create a fake image tensor.
    num_classes: Number of classes that should be represented in the fake labels
      tensor

  Returns:
    An input_fn that can be used in place of a real one to return a dataset
    that can be used for iteration.
  """
128
  def input_fn(is_training, data_dir, batch_size, *args, **kwargs):  # pylint: disable=unused-argument
129
130
131
132
133
    return model_helpers.generate_synthetic_data(
        input_shape=tf.TensorShape([batch_size, height, width, num_channels]),
        input_dtype=tf.float32,
        label_shape=tf.TensorShape([batch_size]),
        label_dtype=tf.int32)
134
135
136
137
138
139
140
141

  return input_fn


################################################################################
# Functions for running training/eval/validation loops for the model.
################################################################################
def learning_rate_with_decay(
142
143
    batch_size, batch_denom, num_images, boundary_epochs, decay_rates,
    base_lr=0.1, warmup=False):
144
145
146
147
148
149
150
151
152
153
154
  """Get a learning rate that decays step-wise as training progresses.

  Args:
    batch_size: the number of examples processed in each training batch.
    batch_denom: this value will be used to scale the base learning rate.
      `0.1 * batch size` is divided by this number, such that when
      batch_denom == batch_size, the initial learning rate will be 0.1.
    num_images: total number of images that will be used for training.
    boundary_epochs: list of ints representing the epochs at which we
      decay the learning rate.
    decay_rates: list of floats representing the decay rates to be used
155
156
      for scaling the learning rate. It should have one more element
      than `boundary_epochs`, and all elements should have the same type.
157
158
    base_lr: Initial learning rate scaled based on batch_denom.
    warmup: Run a 5 epoch warmup to the initial lr.
159
160
161
162
163
  Returns:
    Returns a function that takes a single argument - the number of batches
    trained so far (global_step)- and returns the learning rate to be used
    for training the next batch.
  """
164
  initial_learning_rate = base_lr * batch_size / batch_denom
165
166
  batches_per_epoch = num_images / batch_size

Taylor Robie's avatar
Taylor Robie committed
167
168
169
  # Reduce the learning rate at certain epochs.
  # CIFAR-10: divide by 10 at epoch 100, 150, and 200
  # ImageNet: divide by 10 at epoch 30, 60, 80, and 90
170
171
172
173
  boundaries = [int(batches_per_epoch * epoch) for epoch in boundary_epochs]
  vals = [initial_learning_rate * decay for decay in decay_rates]

  def learning_rate_fn(global_step):
174
175
176
177
178
179
180
181
182
    """Builds scaled learning rate function with 5 epoch warm up."""
    lr = tf.train.piecewise_constant(global_step, boundaries, vals)
    if warmup:
      warmup_steps = int(batches_per_epoch * 5)
      warmup_lr = (
          initial_learning_rate * tf.cast(global_step, tf.float32) / tf.cast(
              warmup_steps, tf.float32))
      return tf.cond(global_step < warmup_steps, lambda: warmup_lr, lambda: lr)
    return lr
183
184
185
186
187
188

  return learning_rate_fn


def resnet_model_fn(features, labels, mode, model_class,
                    resnet_size, weight_decay, learning_rate_fn, momentum,
189
                    data_format, resnet_version, loss_scale,
Zac Wellmer's avatar
Zac Wellmer committed
190
191
                    loss_filter_fn=None, dtype=resnet_model.DEFAULT_DTYPE,
                    fine_tune=False):
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
  """Shared functionality for different resnet model_fns.

  Initializes the ResnetModel representing the model layers
  and uses that model to build the necessary EstimatorSpecs for
  the `mode` in question. For training, this means building losses,
  the optimizer, and the train op that get passed into the EstimatorSpec.
  For evaluation and prediction, the EstimatorSpec is returned without
  a train op, but with the necessary parameters for the given mode.

  Args:
    features: tensor representing input images
    labels: tensor representing class labels for all input images
    mode: current estimator mode; should be one of
      `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT`
    model_class: a class representing a TensorFlow model that has a __call__
      function. We assume here that this is a subclass of ResnetModel.
    resnet_size: A single integer for the size of the ResNet model.
    weight_decay: weight decay loss rate used to regularize learned variables.
    learning_rate_fn: function that returns the current learning rate given
      the current global_step
    momentum: momentum term used for optimization
    data_format: Input format ('channels_last', 'channels_first', or None).
      If set to None, the format is dependent on whether a GPU is available.
215
216
    resnet_version: Integer representing which version of the ResNet network to
      use. See README for details. Valid values: [1, 2]
217
218
    loss_scale: The factor to scale the loss for numerical stability. A detailed
      summary is present in the arg parser help text.
219
220
221
222
    loss_filter_fn: function that takes a string variable name and returns
      True if the var should be included in loss calculation, and False
      otherwise. If None, batch_normalization variables will be excluded
      from the loss.
223
    dtype: the TensorFlow dtype to use for calculations.
Zac Wellmer's avatar
Zac Wellmer committed
224
    fine_tune: If True only train the dense layers(final layers).
225
226
227
228
229
230
231
232
233

  Returns:
    EstimatorSpec parameterized according to the input params and the
    current mode.
  """

  # Generate a summary node for the images
  tf.summary.image('images', features, max_outputs=6)

234
235
  features = tf.cast(features, dtype)

236
237
  model = model_class(resnet_size, data_format, resnet_version=resnet_version,
                      dtype=dtype)
238

239
240
  logits = model(features, mode == tf.estimator.ModeKeys.TRAIN)

241
242
243
244
245
  # This acts as a no-op if the logits are already in fp32 (provided logits are
  # not a SparseTensor). If dtype is is low precision, logits must be cast to
  # fp32 for numerical stability.
  logits = tf.cast(logits, tf.float32)

246
247
248
249
250
251
  predictions = {
      'classes': tf.argmax(logits, axis=1),
      'probabilities': tf.nn.softmax(logits, name='softmax_tensor')
  }

  if mode == tf.estimator.ModeKeys.PREDICT:
252
253
254
255
256
257
258
    # Return the predictions and the specification for serving a SavedModel
    return tf.estimator.EstimatorSpec(
        mode=mode,
        predictions=predictions,
        export_outputs={
            'predict': tf.estimator.export.PredictOutput(predictions)
        })
259
260

  # Calculate loss, which includes softmax cross entropy and L2 regularization.
261
262
  cross_entropy = tf.losses.sparse_softmax_cross_entropy(
      logits=logits, labels=labels)
263
264
265
266
267
268
269

  # Create a tensor named cross_entropy for logging purposes.
  tf.identity(cross_entropy, name='cross_entropy')
  tf.summary.scalar('cross_entropy', cross_entropy)

  # If no loss_filter_fn is passed, assume we want the default behavior,
  # which is that batch_normalization variables are excluded from loss.
Karmel Allison's avatar
Karmel Allison committed
270
271
272
  def exclude_batch_norm(name):
    return 'batch_normalization' not in name
  loss_filter_fn = loss_filter_fn or exclude_batch_norm
273
274

  # Add weight decay to the loss.
275
  l2_loss = weight_decay * tf.add_n(
276
277
      # loss is computed using fp32 for numerical stability.
      [tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables()
278
       if loss_filter_fn(v.name)])
279
280
  tf.summary.scalar('l2_loss', l2_loss)
  loss = cross_entropy + l2_loss
281
282
283
284
285
286
287
288
289
290
291
292

  if mode == tf.estimator.ModeKeys.TRAIN:
    global_step = tf.train.get_or_create_global_step()

    learning_rate = learning_rate_fn(global_step)

    # Create a tensor named learning_rate for logging purposes
    tf.identity(learning_rate, name='learning_rate')
    tf.summary.scalar('learning_rate', learning_rate)

    optimizer = tf.train.MomentumOptimizer(
        learning_rate=learning_rate,
293
294
        momentum=momentum
    )
295

Zac Wellmer's avatar
Zac Wellmer committed
296
    def _dense_grad_filter(gvs):
297
298
299
300
      """Only apply gradient updates to the final layer.

      This function is used for fine tuning.

Zac Wellmer's avatar
Zac Wellmer committed
301
      Args:
302
        gvs: list of tuples with gradients and variable info
Zac Wellmer's avatar
Zac Wellmer committed
303
      Returns:
304
305
        filtered gradients so that only the dense layer remains
      """
Zac Wellmer's avatar
Zac Wellmer committed
306
307
      return [(g, v) for g, v in gvs if 'dense' in v.name]

308
309
310
311
312
313
    if loss_scale != 1:
      # When computing fp16 gradients, often intermediate tensor values are
      # so small, they underflow to 0. To avoid this, we multiply the loss by
      # loss_scale to make these tensor values loss_scale times bigger.
      scaled_grad_vars = optimizer.compute_gradients(loss * loss_scale)

Zac Wellmer's avatar
Zac Wellmer committed
314
315
316
      if fine_tune:
        scaled_grad_vars = _dense_grad_filter(scaled_grad_vars)

317
318
319
320
321
322
      # Once the gradient computation is complete we can scale the gradients
      # back to the correct scale before passing them to the optimizer.
      unscaled_grad_vars = [(grad / loss_scale, var)
                            for grad, var in scaled_grad_vars]
      minimize_op = optimizer.apply_gradients(unscaled_grad_vars, global_step)
    else:
Zac Wellmer's avatar
Zac Wellmer committed
323
324
325
326
      grad_vars = optimizer.compute_gradients(loss)
      if fine_tune:
        grad_vars = _dense_grad_filter(grad_vars)
      minimize_op = optimizer.apply_gradients(grad_vars, global_step)
327

328
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
329
    train_op = tf.group(minimize_op, update_ops)
330
331
332
  else:
    train_op = None

333
  accuracy = tf.metrics.accuracy(labels, predictions['classes'])
334
335
336
337
338
339
  accuracy_top_5 = tf.metrics.mean(tf.nn.in_top_k(predictions=logits,
                                                  targets=labels,
                                                  k=5,
                                                  name='top_5_op'))
  metrics = {'accuracy': accuracy,
             'accuracy_top_5': accuracy_top_5}
340
341
342

  # Create a tensor named train_accuracy for logging purposes
  tf.identity(accuracy[1], name='train_accuracy')
343
  tf.identity(accuracy_top_5[1], name='train_accuracy_top_5')
344
  tf.summary.scalar('train_accuracy', accuracy[1])
345
  tf.summary.scalar('train_accuracy_top_5', accuracy_top_5[1])
346
347
348
349
350
351
352
353
354

  return tf.estimator.EstimatorSpec(
      mode=mode,
      predictions=predictions,
      loss=loss,
      train_op=train_op,
      eval_metric_ops=metrics)


355
356
def resnet_main(
    flags_obj, model_function, input_function, dataset_name, shape=None):
357
358
359
  """Shared main loop for ResNet Models.

  Args:
360
361
    flags_obj: An object containing parsed flags. See define_resnet_flags()
      for details.
362
363
364
365
366
    model_function: the function that instantiates the Model and builds the
      ops for train/eval. This will be passed directly into the estimator.
    input_function: the function that processes the dataset and returns a
      dataset that the estimator can train on. This will be wrapped with
      all the relevant flags for running and passed to estimator.
367
368
    dataset_name: the name of the dataset for training and evaluation. This is
      used for logging purpose.
369
    shape: list of ints representing the shape of the images used for training.
370
      This is only used if flags_obj.export_dir is passed.
371
  """
Karmel Allison's avatar
Karmel Allison committed
372

373
374
  model_helpers.apply_clean(flags.FLAGS)

375
376
377
378
379
380
381
382
  # Using the Winograd non-fused algorithms provides a small performance boost.
  os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1'

  # Create session config based on values of inter_op_parallelism_threads and
  # intra_op_parallelism_threads. Note that we default to having
  # allow_soft_placement = True, which is required for multi-GPU and not
  # harmful for other modes.
  session_config = tf.ConfigProto(
383
384
      inter_op_parallelism_threads=flags_obj.inter_op_parallelism_threads,
      intra_op_parallelism_threads=flags_obj.intra_op_parallelism_threads,
385
386
      allow_soft_placement=True)

387
388
  distribution_strategy = distribution_utils.get_distribution_strategy(
      flags_core.get_num_gpus(flags_obj), flags_obj.all_reduce_alg)
389

390
391
  run_config = tf.estimator.RunConfig(
      train_distribute=distribution_strategy, session_config=session_config)
392

Zac Wellmer's avatar
Zac Wellmer committed
393
394
395
396
397
398
399
400
  # initialize our model with all but the dense layer from pretrained resnet
  if flags_obj.pretrained_model_checkpoint_path is not None:
    warm_start_settings = tf.estimator.WarmStartSettings(
        flags_obj.pretrained_model_checkpoint_path,
        vars_to_warm_start='^(?!.*dense)')
  else:
    warm_start_settings = None

401
  classifier = tf.estimator.Estimator(
402
      model_fn=model_function, model_dir=flags_obj.model_dir, config=run_config,
Zac Wellmer's avatar
Zac Wellmer committed
403
      warm_start_from=warm_start_settings, params={
404
405
406
          'resnet_size': int(flags_obj.resnet_size),
          'data_format': flags_obj.data_format,
          'batch_size': flags_obj.batch_size,
407
          'resnet_version': int(flags_obj.resnet_version),
408
          'loss_scale': flags_core.get_loss_scale(flags_obj),
Zac Wellmer's avatar
Zac Wellmer committed
409
410
          'dtype': flags_core.get_tf_dtype(flags_obj),
          'fine_tune': flags_obj.fine_tune
411
412
      })

413
414
415
416
  run_params = {
      'batch_size': flags_obj.batch_size,
      'dtype': flags_core.get_tf_dtype(flags_obj),
      'resnet_size': flags_obj.resnet_size,
417
      'resnet_version': flags_obj.resnet_version,
418
419
420
      'synthetic_data': flags_obj.use_synthetic_data,
      'train_epochs': flags_obj.train_epochs,
  }
421
  if flags_obj.use_synthetic_data:
422
    dataset_name = dataset_name + '-synthetic'
423

424
  benchmark_logger = logger.get_benchmark_logger()
425
426
  benchmark_logger.log_run_info('resnet', dataset_name, run_params,
                                test_id=flags_obj.benchmark_test_id)
427

428
  train_hooks = hooks_helper.get_train_hooks(
429
      flags_obj.hooks,
430
      model_dir=flags_obj.model_dir,
431
      batch_size=flags_obj.batch_size)
432

Taylor Robie's avatar
Taylor Robie committed
433
  def input_fn_train(num_epochs):
434
435
    return input_function(
        is_training=True, data_dir=flags_obj.data_dir,
436
        batch_size=distribution_utils.per_device_batch_size(
437
            flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
Taylor Robie's avatar
Taylor Robie committed
438
        num_epochs=num_epochs,
Taylor Robie's avatar
Taylor Robie committed
439
        num_gpus=flags_core.get_num_gpus(flags_obj))
440

441
  def input_fn_eval():
442
443
    return input_function(
        is_training=False, data_dir=flags_obj.data_dir,
444
        batch_size=distribution_utils.per_device_batch_size(
445
446
            flags_obj.batch_size, flags_core.get_num_gpus(flags_obj)),
        num_epochs=1)
Taylor Robie's avatar
Taylor Robie committed
447

Taylor Robie's avatar
Taylor Robie committed
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
  if flags_obj.eval_only or not flags_obj.train_epochs:
    # If --eval_only is set, perform a single loop with zero train epochs.
    schedule, n_loops = [0], 1
  else:
    # Compute the number of times to loop while training. All but the last
    # pass will train for `epochs_between_evals` epochs, while the last will
    # train for the number needed to reach `training_epochs`. For instance if
    #   train_epochs = 25 and epochs_between_evals = 10
    # schedule will be set to [10, 10, 5]. That is to say, the loop will:
    #   Train for 10 epochs and then evaluate.
    #   Train for another 10 epochs and then evaluate.
    #   Train for a final 5 epochs (to reach 25 epochs) and then evaluate.
    n_loops = math.ceil(flags_obj.train_epochs / flags_obj.epochs_between_evals)
    schedule = [flags_obj.epochs_between_evals for _ in range(int(n_loops))]
    schedule[-1] = flags_obj.train_epochs - sum(schedule[:-1])  # over counting.

  for cycle_index, num_train_epochs in enumerate(schedule):
    tf.logging.info('Starting cycle: %d/%d', cycle_index, int(n_loops))

    if num_train_epochs:
      classifier.train(input_fn=lambda: input_fn_train(num_train_epochs),
                       hooks=train_hooks, max_steps=flags_obj.max_train_steps)
470

471
    tf.logging.info('Starting to evaluate.')
472
473
474
475
476

    # flags_obj.max_train_steps is generally associated with testing and
    # profiling. As a result it is frequently called with synthetic data, which
    # will iterate forever. Passing steps=flags_obj.max_train_steps allows the
    # eval (which is generally unimportant in those circumstances) to terminate.
477
478
479
    # Note that eval will run for max_train_steps each loop, regardless of the
    # global_step count.
    eval_results = classifier.evaluate(input_fn=input_fn_eval,
480
                                       steps=flags_obj.max_train_steps)
481

Qianli Scott Zhu's avatar
Qianli Scott Zhu committed
482
    benchmark_logger.log_evaluation_result(eval_results)
483

484
    if model_helpers.past_stop_threshold(
485
        flags_obj.stop_threshold, eval_results['accuracy']):
486
487
      break

488
  if flags_obj.export_dir is not None:
489
490
    # Exports a saved model for the given classifier.
    input_receiver_fn = export.build_tensor_serving_input_receiver_fn(
491
492
        shape, batch_size=flags_obj.batch_size)
    classifier.export_savedmodel(flags_obj.export_dir, input_receiver_fn)
493
494


495
496
497
def define_resnet_flags(resnet_size_choices=None):
  """Add flags and validators for ResNet."""
  flags_core.define_base()
498
  flags_core.define_performance(num_parallel_calls=False)
499
500
501
  flags_core.define_image()
  flags_core.define_benchmark()
  flags.adopt_module_key_flags(flags_core)
502

503
  flags.DEFINE_enum(
504
505
      name='resnet_version', short_name='rv', default='2',
      enum_values=['1', '2'],
506
507
      help=flags_core.help_wrap(
          'Version of ResNet. (1 or 2) See README.md for details.'))
Zac Wellmer's avatar
Zac Wellmer committed
508
509
510
511
512
513
514
515
516
  flags.DEFINE_bool(
      name='fine_tune', short_name='ft', default=False,
      help=flags_core.help_wrap(
          'If True do not train any parameters except for the final layer.'))
  flags.DEFINE_string(
      name='pretrained_model_checkpoint_path', short_name='pmcp', default=None,
      help=flags_core.help_wrap(
          'If not None initialize all the network except the final layer with '
          'these values'))
Taylor Robie's avatar
Taylor Robie committed
517
518
519
520
  flags.DEFINE_boolean(
      name="eval_only", default=False,
      help=flags_core.help_wrap('Skip training and only perform evaluation on '
                                'the latest checkpoint.'))
521

522
523
524
  choice_kwargs = dict(
      name='resnet_size', short_name='rs', default='50',
      help=flags_core.help_wrap('The size of the ResNet model to use.'))
525

526
527
528
529
  if resnet_size_choices is None:
    flags.DEFINE_string(**choice_kwargs)
  else:
    flags.DEFINE_enum(enum_values=resnet_size_choices, **choice_kwargs)