"web/app/vscode:/vscode.git/clone" did not exist on "4c2b4589ac8799b045851252e8603210e46b1a62"
distributed_executor.py 29.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom training loop for running TensorFlow 2.0 models."""

from __future__ import absolute_import
from __future__ import division
# from __future__ import google_type_annotations
from __future__ import print_function

import os

from absl import flags
from absl import logging
Allen Wang's avatar
Allen Wang committed
26
27

import numpy as np
28
29
30
import tensorflow as tf

# pylint: disable=unused-import,g-import-not-at-top,redefined-outer-name,reimported
Yeqing Li's avatar
Yeqing Li committed
31
from typing import Optional, Dict, List, Text, Callable, Union, Iterator, Any
32
from official.modeling.hyperparams import params_dict
Yeqing Li's avatar
Yeqing Li committed
33
from official.utils import hyperparams_flags
34
from official.common import distribute_utils
Will Cromar's avatar
Will Cromar committed
35
from official.utils.misc import keras_utils
36
37
38

FLAGS = flags.FLAGS

Yeqing Li's avatar
Yeqing Li committed
39
40
strategy_flags_dict = hyperparams_flags.strategy_flags_dict
hparam_flags_dict = hyperparams_flags.hparam_flags_dict
41
42
43
44
45
46
47
48
49
50


def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix):
  """Saves model to model_dir with provided checkpoint prefix."""

  checkpoint_path = os.path.join(model_dir, checkpoint_prefix)
  saved_path = checkpoint.save(checkpoint_path)
  logging.info('Saving model as TF checkpoint: %s', saved_path)


Yeqing Li's avatar
Yeqing Li committed
51
52
53
54
55
56
57
def _steps_to_run(current_step, total_steps, steps_per_loop):
  """Calculates steps to run on device."""
  if steps_per_loop <= 0:
    raise ValueError('steps_per_loop should be positive integer.')
  return min(total_steps - current_step, steps_per_loop)


58
59
60
61
def _no_metric():
  return None


Yeqing Li's avatar
Yeqing Li committed
62
63
64
65
def metrics_as_dict(metric):
  """Puts input metric(s) into a list.

  Args:
66
67
    metric: metric(s) to be put into the list. `metric` could be an object, a
      list, or a dict of tf.keras.metrics.Metric or has the `required_method`.
Yeqing Li's avatar
Yeqing Li committed
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100

  Returns:
    A dictionary of valid metrics.
  """
  if isinstance(metric, tf.keras.metrics.Metric):
    metrics = {metric.name: metric}
  elif isinstance(metric, list):
    metrics = {m.name: m for m in metric}
  elif isinstance(metric, dict):
    metrics = metric
  elif not metric:
    return {}
  else:
    metrics = {'metric': metric}
  return metrics


def metric_results(metric):
  """Collects results from the given metric(s)."""
  metrics = metrics_as_dict(metric)
  metric_result = {
      name: m.result().numpy().astype(float) for name, m in metrics.items()
  }
  return metric_result


def reset_states(metric):
  """Resets states of the given metric(s)."""
  metrics = metrics_as_dict(metric)
  for m in metrics.values():
    m.reset_states()


101
102
103
104
class SummaryWriter(object):
  """Simple SummaryWriter for writing dictionary of metrics.

  Attributes:
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
105
    writer: The tf.SummaryWriter.
106
107
108
109
110
111
112
113
114
  """

  def __init__(self, model_dir: Text, name: Text):
    """Inits SummaryWriter with paths.

    Arguments:
      model_dir: the model folder path.
      name: the summary subfolder name.
    """
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
115
    self.writer = tf.summary.create_file_writer(os.path.join(model_dir, name))
116
117
118
119
120
121
122
123
124
125
126
127
128

  def __call__(self, metrics: Union[Dict[Text, float], float], step: int):
    """Write metrics to summary with the given writer.

    Args:
      metrics: a dictionary of metrics values. Prefer dictionary.
      step: integer. The training step.
    """
    if not isinstance(metrics, dict):
      # Support scalar metric without name.
      logging.warning('Warning: summary writer prefer metrics as dictionary.')
      metrics = {'metric': metrics}

A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
129
    with self.writer.as_default():
130
131
      for k, v in metrics.items():
        tf.summary.scalar(k, v, step=step)
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
132
      self.writer.flush()
133
134
135


class DistributedExecutor(object):
Hongkun Yu's avatar
Hongkun Yu committed
136
  """Interface to train and eval models with tf.distribute.Strategy."""
137

Hongkun Yu's avatar
Hongkun Yu committed
138
  def __init__(self, strategy, params, model_fn, loss_fn, is_multi_host=False):
Yeqing Li's avatar
Yeqing Li committed
139
140
141
142
143
144
145
146
147
148
149
150
    """Constructor.

    Args:
      strategy: an instance of tf.distribute.Strategy.
      params: Model configuration needed to run distribution strategy.
      model_fn: Keras model function. Signature:
        (params: ParamsDict) -> tf.keras.models.Model.
      loss_fn: loss function. Signature:
        (y_true: Tensor, y_pred: Tensor) -> Tensor
      is_multi_host: Set to True when using multi hosts for training, like multi
        worker GPU or TPU pod (slice). Otherwise, False.
    """
151
152
153
154
155
156
157

    self._params = params
    self._model_fn = model_fn
    self._loss_fn = loss_fn
    self._strategy = strategy
    self._checkpoint_name = 'ctl_step_{step}.ckpt'
    self._is_multi_host = is_multi_host
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
158
159
160
    self.train_summary_writer = None
    self.eval_summary_writer = None
    self.global_train_step = None
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191

  @property
  def checkpoint_name(self):
    """Returns default checkpoint name."""
    return self._checkpoint_name

  @checkpoint_name.setter
  def checkpoint_name(self, name):
    """Sets default summary writer for the current thread."""
    self._checkpoint_name = name

  def loss_fn(self):
    return self._loss_fn()

  def model_fn(self, params):
    return self._model_fn(params)

  def _save_config(self, model_dir):
    """Save parameters to config files if model_dir is defined."""

    logging.info('Save config to model_dir %s.', model_dir)
    if model_dir:
      if not tf.io.gfile.exists(model_dir):
        tf.io.gfile.makedirs(model_dir)
      self._params.lock()
      params_dict.save_params_dict_to_yaml(self._params,
                                           model_dir + '/params.yaml')
    else:
      logging.warning('model_dir is empty, so skip the save config.')

  def _get_input_iterator(
Yeqing Li's avatar
Yeqing Li committed
192
      self, input_fn: Callable[..., tf.data.Dataset],
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
      strategy: tf.distribute.Strategy) -> Optional[Iterator[Any]]:
    """Returns distributed dataset iterator.

    Args:
      input_fn: (params: dict) -> tf.data.Dataset.
      strategy: an instance of tf.distribute.Strategy.

    Returns:
      An iterator that yields input tensors.
    """

    if input_fn is None:
      return None
    # When training with multiple TPU workers, datasets needs to be cloned
    # across workers. Since Dataset instance cannot be cloned in eager mode,
    # we instead pass callable that returns a dataset.
    if self._is_multi_host:
Chenkai Kuang's avatar
Chenkai Kuang committed
210
      return iter(strategy.distribute_datasets_from_function(input_fn))
211
    else:
Yeqing Li's avatar
Yeqing Li committed
212
      input_data = input_fn()
213
214
      return iter(strategy.experimental_distribute_dataset(input_data))

Yeqing Li's avatar
Yeqing Li committed
215
216
217
218
219
220
  def _create_replicated_step(self,
                              strategy,
                              model,
                              loss_fn,
                              optimizer,
                              metric=None):
Yeqing Li's avatar
Yeqing Li committed
221
222
223
224
225
226
227
228
229
230
231
232
    """Creates a single training step.

    Args:
      strategy: an instance of tf.distribute.Strategy.
      model: (Tensor, bool) -> Tensor. model function.
      loss_fn: (y_true: Tensor, y_pred: Tensor) -> Tensor.
      optimizer: tf.keras.optimizers.Optimizer.
      metric: tf.keras.metrics.Metric subclass.

    Returns:
      The training step callable.
    """
Yeqing Li's avatar
Yeqing Li committed
233
    metrics = metrics_as_dict(metric)
Yeqing Li's avatar
Yeqing Li committed
234
235
236
237
238
239
240
241
242
243

    def _replicated_step(inputs):
      """Replicated training step."""
      inputs, labels = inputs

      with tf.GradientTape() as tape:
        outputs = model(inputs, training=True)
        prediction_loss = loss_fn(labels, outputs)
        loss = tf.reduce_mean(prediction_loss)
        loss = loss / strategy.num_replicas_in_sync
Yeqing Li's avatar
Yeqing Li committed
244
245
        for m in metrics.values():
          m.update_state(labels, outputs)
Yeqing Li's avatar
Yeqing Li committed
246
247
248
249
250
251
252

      grads = tape.gradient(loss, model.trainable_variables)
      optimizer.apply_gradients(zip(grads, model.trainable_variables))
      return loss

    return _replicated_step

253
254
255
256
257
258
259
260
  def _create_train_step(self,
                         strategy,
                         model,
                         loss_fn,
                         optimizer,
                         metric=None):
    """Creates a distributed training step.

Yeqing Li's avatar
Yeqing Li committed
261
262
263
264
265
266
    Args:
      strategy: an instance of tf.distribute.Strategy.
      model: (Tensor, bool) -> Tensor. model function.
      loss_fn: (y_true: Tensor, y_pred: Tensor) -> Tensor.
      optimizer: tf.keras.optimizers.Optimizer.
      metric: tf.keras.metrics.Metric subclass.
267

Yeqing Li's avatar
Yeqing Li committed
268
269
    Returns:
      The training step callable.
270
    """
Yeqing Li's avatar
Yeqing Li committed
271
272
    replicated_step = self._create_replicated_step(strategy, model, loss_fn,
                                                   optimizer, metric)
273
274

    @tf.function
Yeqing Li's avatar
Yeqing Li committed
275
    def train_step(iterator, num_steps):
276
277
278
279
      """Performs a distributed training step.

      Args:
        iterator: an iterator that yields input tensors.
Yeqing Li's avatar
Yeqing Li committed
280
        num_steps: the number of steps in the loop.
281
282
283
284

      Returns:
        The loss tensor.
      """
Yeqing Li's avatar
Yeqing Li committed
285
286
287
      if not isinstance(num_steps, tf.Tensor):
        raise ValueError('steps should be an Tensor. Python object may cause '
                         'retracing.')
288

Hongkun Yu's avatar
Hongkun Yu committed
289
      per_replica_losses = strategy.run(replicated_step, args=(next(iterator),))
Yeqing Li's avatar
Yeqing Li committed
290
      for _ in tf.range(num_steps - 1):
Ken Franko's avatar
Ken Franko committed
291
        per_replica_losses = strategy.run(
Yeqing Li's avatar
Yeqing Li committed
292
            replicated_step, args=(next(iterator),))
293
294

      # For reporting, we returns the mean of losses.
Yeqing Li's avatar
Yeqing Li committed
295
296
297
298
      losses = tf.nest.map_structure(
          lambda x: strategy.reduce(tf.distribute.ReduceOp.MEAN, x, axis=None),
          per_replica_losses)
      return losses
299
300
301
302
303

    return train_step

  def _create_test_step(self, strategy, model, metric):
    """Creates a distributed test step."""
Yeqing Li's avatar
Yeqing Li committed
304
    metrics = metrics_as_dict(metric)
305
306
307
308
309
310
311
312
313
314
315
316

    @tf.function
    def test_step(iterator):
      """Calculates evaluation metrics on distributed devices."""
      if not metric:
        logging.info('Skip test_step because metric is None (%s)', metric)
        return None, None

      def _test_step_fn(inputs):
        """Replicated accuracy calculation."""
        inputs, labels = inputs
        model_outputs = model(inputs, training=False)
Yeqing Li's avatar
Yeqing Li committed
317
318
        for m in metrics.values():
          m.update_state(labels, model_outputs)
319
320
        return labels, model_outputs

Ken Franko's avatar
Ken Franko committed
321
      return strategy.run(_test_step_fn, args=(next(iterator),))
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336

    return test_step

  def train(self,
            train_input_fn: Callable[[params_dict.ParamsDict], tf.data.Dataset],
            eval_input_fn: Callable[[params_dict.ParamsDict],
                                    tf.data.Dataset] = None,
            model_dir: Text = None,
            total_steps: int = 1,
            iterations_per_loop: int = 1,
            train_metric_fn: Callable[[], Any] = None,
            eval_metric_fn: Callable[[], Any] = None,
            summary_writer_fn: Callable[[Text, Text],
                                        SummaryWriter] = SummaryWriter,
            init_checkpoint: Callable[[tf.keras.Model], Any] = None,
Yeqing Li's avatar
Yeqing Li committed
337
            custom_callbacks: List[tf.keras.callbacks.Callback] = None,
338
            continuous_eval: bool = False,
339
340
341
342
343
344
345
            save_config: bool = True):
    """Runs distributed training.

    Args:
      train_input_fn: (params: dict) -> tf.data.Dataset training data input
        function.
      eval_input_fn: (Optional) same type as train_input_fn. If not None, will
346
347
        trigger evaluating metric on eval data. If None, will not run the eval
        step.
348
349
350
351
352
353
354
355
      model_dir: the folder path for model checkpoints.
      total_steps: total training steps.
      iterations_per_loop: train steps per loop. After each loop, this job will
        update metrics like loss and save checkpoint.
      train_metric_fn: metric_fn for evaluation in train_step.
      eval_metric_fn: metric_fn for evaluation in test_step.
      summary_writer_fn: function to create summary writer.
      init_checkpoint: function to load checkpoint.
Yeqing Li's avatar
Yeqing Li committed
356
357
358
      custom_callbacks: A list of Keras Callbacks objects to run during
        training. More specifically, `on_batch_begin()`, `on_batch_end()`,
        methods are invoked during training.
359
360
361
      continuous_eval: If `True`, will continously run evaluation on every
        available checkpoints. If `False`, will do the evaluation once after the
        final step.
362
      save_config: bool. Whether to save params to model_dir.
Hongkun Yu's avatar
Hongkun Yu committed
363

364
    Returns:
365
      The training loss and eval metrics.
366
367
368
369
370
371
372
373
374
375
376
    """
    assert train_input_fn is not None
    if train_metric_fn and not callable(train_metric_fn):
      raise ValueError('if `train_metric_fn` is specified, '
                       'train_metric_fn must be a callable.')
    if eval_metric_fn and not callable(eval_metric_fn):
      raise ValueError('if `eval_metric_fn` is specified, '
                       'eval_metric_fn must be a callable.')
    train_metric_fn = train_metric_fn or _no_metric
    eval_metric_fn = eval_metric_fn or _no_metric

Yeqing Li's avatar
Yeqing Li committed
377
    if custom_callbacks and iterations_per_loop != 1:
Will Cromar's avatar
Will Cromar committed
378
      logging.warning(
Yeqing Li's avatar
Yeqing Li committed
379
380
381
          'It is sematically wrong to run callbacks when '
          'iterations_per_loop is not one (%s)', iterations_per_loop)

Will Cromar's avatar
Will Cromar committed
382
383
    custom_callbacks = custom_callbacks or []

Yeqing Li's avatar
Yeqing Li committed
384
385
386
387
388
    def _run_callbacks_on_batch_begin(batch):
      """Runs custom callbacks at the start of every step."""
      if not custom_callbacks:
        return
      for callback in custom_callbacks:
Yeqing Li's avatar
Yeqing Li committed
389
390
        if callback:
          callback.on_batch_begin(batch)
Yeqing Li's avatar
Yeqing Li committed
391
392
393
394
395
396

    def _run_callbacks_on_batch_end(batch):
      """Runs custom callbacks at the end of every step."""
      if not custom_callbacks:
        return
      for callback in custom_callbacks:
Yeqing Li's avatar
Yeqing Li committed
397
398
        if callback:
          callback.on_batch_end(batch)
Yeqing Li's avatar
Yeqing Li committed
399

400
401
402
    if save_config:
      self._save_config(model_dir)

403
404
405
406
407
    if FLAGS.save_checkpoint_freq:
      save_freq = FLAGS.save_checkpoint_freq
    else:
      save_freq = iterations_per_loop

408
409
410
411
412
    params = self._params
    strategy = self._strategy
    # To reduce unnecessary send/receive input pipeline operation, we place
    # input pipeline ops in worker task.
    train_iterator = self._get_input_iterator(train_input_fn, strategy)
413
    train_loss = None
414
    train_metric_result = None
415
    eval_metric_result = None
Yeqing Li's avatar
Yeqing Li committed
416
    tf.keras.backend.set_learning_phase(1)
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
    with strategy.scope():
      # To correctly place the model weights on accelerators,
      # model and optimizer should be created in scope.
      model = self.model_fn(params.as_dict())
      if not hasattr(model, 'optimizer'):
        raise ValueError('User should set optimizer attribute to model '
                         'inside `model_fn`.')
      optimizer = model.optimizer

      # Training loop starts here.
      checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
      latest_checkpoint_file = tf.train.latest_checkpoint(model_dir)
      initial_step = 0
      if latest_checkpoint_file:
        logging.info(
            'Checkpoint file %s found and restoring from '
            'checkpoint', latest_checkpoint_file)
        checkpoint.restore(latest_checkpoint_file)
        initial_step = optimizer.iterations.numpy()
        logging.info('Loading from checkpoint file completed. Init step %d',
                     initial_step)
      elif init_checkpoint:
        logging.info('Restoring from init checkpoint function')
        init_checkpoint(model)
        logging.info('Loading from init checkpoint file completed')

      current_step = optimizer.iterations.numpy()
      checkpoint_name = self.checkpoint_name

      eval_metric = eval_metric_fn()
      train_metric = train_metric_fn()
      train_summary_writer = summary_writer_fn(model_dir, 'eval_train')
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
449
450
      self.train_summary_writer = train_summary_writer.writer

451
      test_summary_writer = summary_writer_fn(model_dir, 'eval_test')
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
452
      self.eval_summary_writer = test_summary_writer.writer
453

Will Cromar's avatar
Will Cromar committed
454
455
456
457
458
    # Use training summary writer in TimeHistory if it's in use
    for cb in custom_callbacks:
      if isinstance(cb, keras_utils.TimeHistory):
        cb.summary_writer = self.train_summary_writer

459
460
    # Continue training loop.
    train_step = self._create_train_step(
Yeqing Li's avatar
Yeqing Li committed
461
462
463
464
465
        strategy=strategy,
        model=model,
        loss_fn=self.loss_fn(),
        optimizer=optimizer,
        metric=train_metric)
466
467
    test_step = None
    if eval_input_fn and eval_metric:
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
468
      self.global_train_step = model.optimizer.iterations
469
470
      test_step = self._create_test_step(strategy, model, metric=eval_metric)

471
    # Step-0 operations
Yeqing Li's avatar
Yeqing Li committed
472
    if current_step == 0 and not latest_checkpoint_file:
Hongkun Yu's avatar
Hongkun Yu committed
473
474
      _save_checkpoint(checkpoint, model_dir,
                       checkpoint_name.format(step=current_step))
475
476
    if test_step:
      eval_iterator = self._get_input_iterator(eval_input_fn, strategy)
Hongkun Yu's avatar
Hongkun Yu committed
477
478
479
480
481
      eval_metric_result = self._run_evaluation(test_step, current_step,
                                                eval_metric, eval_iterator)
      logging.info('Step: %s evalation metric = %s.', current_step,
                   eval_metric_result)
      test_summary_writer(metrics=eval_metric_result, step=optimizer.iterations)
Yeqing Li's avatar
Yeqing Li committed
482
      reset_states(eval_metric)
483

484
    logging.info('Training started')
485
    last_save_checkpoint_step = current_step
Yeqing Li's avatar
Yeqing Li committed
486
    while current_step < total_steps:
487

Yeqing Li's avatar
Yeqing Li committed
488
489
490
491
492
      num_steps = _steps_to_run(current_step, total_steps, iterations_per_loop)
      _run_callbacks_on_batch_begin(current_step)
      train_loss = train_step(train_iterator,
                              tf.convert_to_tensor(num_steps, dtype=tf.int32))
      current_step += num_steps
493
494
495

      train_loss = tf.nest.map_structure(lambda x: x.numpy().astype(float),
                                         train_loss)
Will Cromar's avatar
Will Cromar committed
496
497

      _run_callbacks_on_batch_end(current_step - 1)
498
499
      if not isinstance(train_loss, dict):
        train_loss = {'total_loss': train_loss}
Yeqing Li's avatar
Yeqing Li committed
500
501
      if np.isnan(train_loss['total_loss']):
        raise ValueError('total loss is NaN.')
502
503

      if train_metric:
Yeqing Li's avatar
Yeqing Li committed
504
        train_metric_result = metric_results(train_metric)
505
506
507
508
509
510
511
512
513
        train_metric_result.update(train_loss)
      else:
        train_metric_result = train_loss
      if callable(optimizer.lr):
        train_metric_result.update(
            {'learning_rate': optimizer.lr(current_step).numpy()})
      else:
        train_metric_result.update({'learning_rate': optimizer.lr.numpy()})
      logging.info('Train Step: %d/%d  / loss = %s / training metric = %s',
Hongkun Yu's avatar
Hongkun Yu committed
514
                   current_step, total_steps, train_loss, train_metric_result)
515
516
517
518

      train_summary_writer(
          metrics=train_metric_result, step=optimizer.iterations)

Yeqing Li's avatar
Yeqing Li committed
519
520
      # Saves model checkpoints and run validation steps at every
      # iterations_per_loop steps.
521
522
      # To avoid repeated model saving, we do not save after the last
      # step of training.
523
524
      if save_freq > 0 and current_step < total_steps and (
          current_step - last_save_checkpoint_step) >= save_freq:
525
526
        _save_checkpoint(checkpoint, model_dir,
                         checkpoint_name.format(step=current_step))
527
        last_save_checkpoint_step = current_step
528

529
      if continuous_eval and current_step < total_steps and test_step:
530
531
532
533
534
535
536
537
538
539
        eval_iterator = self._get_input_iterator(eval_input_fn, strategy)
        eval_metric_result = self._run_evaluation(test_step, current_step,
                                                  eval_metric, eval_iterator)
        logging.info('Step: %s evalation metric = %s.', current_step,
                     eval_metric_result)
        test_summary_writer(
            metrics=eval_metric_result, step=optimizer.iterations)

      # Re-initialize evaluation metric, except the last step.
      if eval_metric and current_step < total_steps:
Yeqing Li's avatar
Yeqing Li committed
540
        reset_states(eval_metric)
541
      if train_metric and current_step < total_steps:
Yeqing Li's avatar
Yeqing Li committed
542
        reset_states(train_metric)
543
544

    # Reaches the end of training and saves the last checkpoint.
545
546
547
    if last_save_checkpoint_step < total_steps:
      _save_checkpoint(checkpoint, model_dir,
                       checkpoint_name.format(step=current_step))
548
549
550
551
552
553
554

    if test_step:
      logging.info('Running final evaluation after training is complete.')
      eval_iterator = self._get_input_iterator(eval_input_fn, strategy)
      eval_metric_result = self._run_evaluation(test_step, current_step,
                                                eval_metric, eval_iterator)
      logging.info('Final evaluation metric = %s.', eval_metric_result)
Hongkun Yu's avatar
Hongkun Yu committed
555
      test_summary_writer(metrics=eval_metric_result, step=optimizer.iterations)
556

Will Cromar's avatar
Will Cromar committed
557
558
559
    self.train_summary_writer.close()
    self.eval_summary_writer.close()

560
    return train_metric_result, eval_metric_result
561
562
563
564
565
566
567
568
569
570

  def _run_evaluation(self, test_step, current_training_step, metric,
                      test_iterator):
    """Runs validation steps and aggregate metrics."""
    if not test_iterator or not metric:
      logging.warning(
          'Both test_iterator (%s) and metrics (%s) must not be None.',
          test_iterator, metric)
      return None
    logging.info('Running evaluation after step: %s.', current_training_step)
Yeqing Li's avatar
Yeqing Li committed
571
    eval_step = 0
572
573
    while True:
      try:
Yeqing Li's avatar
Yeqing Li committed
574
575
576
        with tf.experimental.async_scope():
          test_step(test_iterator)
          eval_step += 1
577
      except (StopIteration, tf.errors.OutOfRangeError):
Yeqing Li's avatar
Yeqing Li committed
578
        tf.experimental.async_clear_error()
579
580
        break

Yeqing Li's avatar
Yeqing Li committed
581
    metric_result = metric_results(metric)
Yeqing Li's avatar
Yeqing Li committed
582
583
584
    logging.info('Total eval steps: [%d]', eval_step)
    logging.info('At training step: [%r] Validation metric = %r',
                 current_training_step, metric_result)
585
586
587
588
589
590
591
592
593
594
595
596
597
598
    return metric_result

  def evaluate_from_model_dir(
      self,
      model_dir: Text,
      eval_input_fn: Callable[[params_dict.ParamsDict], tf.data.Dataset],
      eval_metric_fn: Callable[[], Any],
      total_steps: int = -1,
      eval_timeout: int = None,
      min_eval_interval: int = 180,
      summary_writer_fn: Callable[[Text, Text], SummaryWriter] = SummaryWriter):
    """Runs distributed evaluation on model folder.

    Args:
Yeqing Li's avatar
Yeqing Li committed
599
      model_dir: the folder for storing model checkpoints.
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
      eval_input_fn: (Optional) same type as train_input_fn. If not None, will
        trigger evaluting metric on eval data. If None, will not run eval step.
      eval_metric_fn: metric_fn for evaluation in test_step.
      total_steps: total training steps. If the current step reaches the
        total_steps, the evaluation loop will stop.
      eval_timeout: The maximum number of seconds to wait between checkpoints.
        If left as None, then the process will wait indefinitely. Used by
        tf.train.checkpoints_iterator.
      min_eval_interval: The minimum number of seconds between yielding
        checkpoints. Used by tf.train.checkpoints_iterator.
      summary_writer_fn: function to create summary writer.

    Returns:
      Eval metrics dictionary of the last checkpoint.
    """

    if not model_dir:
      raise ValueError('model_dir must be set.')

    def terminate_eval():
      tf.logging.info('Terminating eval after %d seconds of no checkpoints' %
                      eval_timeout)
      return True

    summary_writer = summary_writer_fn(model_dir, 'eval')
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
625
    self.eval_summary_writer = summary_writer.writer
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652

    # Read checkpoints from the given model directory
    # until `eval_timeout` seconds elapses.
    for checkpoint_path in tf.train.checkpoints_iterator(
        model_dir,
        min_interval_secs=min_eval_interval,
        timeout=eval_timeout,
        timeout_fn=terminate_eval):
      eval_metric_result, current_step = self.evaluate_checkpoint(
          checkpoint_path=checkpoint_path,
          eval_input_fn=eval_input_fn,
          eval_metric_fn=eval_metric_fn,
          summary_writer=summary_writer)
      if total_steps > 0 and current_step >= total_steps:
        logging.info('Evaluation finished after training step %d', current_step)
        break
    return eval_metric_result

  def evaluate_checkpoint(self,
                          checkpoint_path: Text,
                          eval_input_fn: Callable[[params_dict.ParamsDict],
                                                  tf.data.Dataset],
                          eval_metric_fn: Callable[[], Any],
                          summary_writer: SummaryWriter = None):
    """Runs distributed evaluation on the one checkpoint.

    Args:
Yeqing Li's avatar
Yeqing Li committed
653
      checkpoint_path: the checkpoint to evaluate.
654
655
656
      eval_input_fn: (Optional) same type as train_input_fn. If not None, will
        trigger evaluting metric on eval data. If None, will not run eval step.
      eval_metric_fn: metric_fn for evaluation in test_step.
Yeqing Li's avatar
Yeqing Li committed
657
      summary_writer: function to create summary writer.
658
659
660
661
662
663
664
665

    Returns:
      Eval metrics dictionary of the last checkpoint.
    """
    if not callable(eval_metric_fn):
      raise ValueError('if `eval_metric_fn` is specified, '
                       'eval_metric_fn must be a callable.')

666
    old_phase = tf.keras.backend.learning_phase()
Yeqing Li's avatar
Yeqing Li committed
667
    tf.keras.backend.set_learning_phase(0)
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
    params = self._params
    strategy = self._strategy
    # To reduce unnecessary send/receive input pipeline operation, we place
    # input pipeline ops in worker task.
    with strategy.scope():

      # To correctly place the model weights on accelerators,
      # model and optimizer should be created in scope.
      model = self.model_fn(params.as_dict())
      checkpoint = tf.train.Checkpoint(model=model)

      eval_metric = eval_metric_fn()
      assert eval_metric, 'eval_metric does not exist'
      test_step = self._create_test_step(strategy, model, metric=eval_metric)

      logging.info('Starting to evaluate.')
      if not checkpoint_path:
        raise ValueError('checkpoint path is empty')
      reader = tf.compat.v1.train.NewCheckpointReader(checkpoint_path)
      current_step = reader.get_tensor(
          'optimizer/iter/.ATTRIBUTES/VARIABLE_VALUE')
Hongkun Yu's avatar
Hongkun Yu committed
689
690
      logging.info('Checkpoint file %s found and restoring from '
                   'checkpoint', checkpoint_path)
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
691
692
      status = checkpoint.restore(checkpoint_path)
      status.expect_partial().assert_existing_objects_matched()
693

A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
694
      self.global_train_step = model.optimizer.iterations
695
696
697
698
699
700
      eval_iterator = self._get_input_iterator(eval_input_fn, strategy)
      eval_metric_result = self._run_evaluation(test_step, current_step,
                                                eval_metric, eval_iterator)
      logging.info('Step: %s evalation metric = %s.', current_step,
                   eval_metric_result)
      summary_writer(metrics=eval_metric_result, step=current_step)
Yeqing Li's avatar
Yeqing Li committed
701
      reset_states(eval_metric)
702

703
    tf.keras.backend.set_learning_phase(old_phase)
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
    return eval_metric_result, current_step

  def predict(self):
    return NotImplementedError('Unimplmented function.')


class ExecutorBuilder(object):
  """Builder of DistributedExecutor.

  Example 1: Builds an executor with supported Strategy.
    builder = ExecutorBuilder(
        strategy_type='tpu',
        strategy_config={'tpu': '/bns/xxx'})
    dist_executor = builder.build_executor(
        params=params,
        model_fn=my_model_fn,
        loss_fn=my_loss_fn,
        metric_fn=my_metric_fn)

  Example 2: Builds an executor with customized Strategy.
    builder = ExecutorBuilder()
    builder.strategy = <some customized Strategy>
    dist_executor = builder.build_executor(
        params=params,
        model_fn=my_model_fn,
        loss_fn=my_loss_fn,
        metric_fn=my_metric_fn)

  Example 3: Builds a customized executor with customized Strategy.
    class MyDistributedExecutor(DistributedExecutor):
      # implementation ...

    builder = ExecutorBuilder()
    builder.strategy = <some customized Strategy>
    dist_executor = builder.build_executor(
        class_ctor=MyDistributedExecutor,
        params=params,
        model_fn=my_model_fn,
        loss_fn=my_loss_fn,
        metric_fn=my_metric_fn)
  """

  def __init__(self, strategy_type=None, strategy_config=None):
747
748
    _ = distribute_utils.configure_cluster(strategy_config.worker_hosts,
                                           strategy_config.task_index)
Yeqing Li's avatar
Yeqing Li committed
749
750
751
752
    """Constructor.

    Args:
      strategy_type: string. One of 'tpu', 'mirrored', 'multi_worker_mirrored'.
753
        If None, the user is responsible to set the strategy before calling
Yeqing Li's avatar
Yeqing Li committed
754
755
756
757
        build_executor(...).
      strategy_config: necessary config for constructing the proper Strategy.
        Check strategy_flags_dict() for examples of the structure.
    """
758
    self._strategy = distribute_utils.get_distribution_strategy(
Yeqing Li's avatar
Yeqing Li committed
759
760
761
762
763
        distribution_strategy=strategy_type,
        num_gpus=strategy_config.num_gpus,
        all_reduce_alg=strategy_config.all_reduce_alg,
        num_packs=strategy_config.num_packs,
        tpu_address=strategy_config.tpu)
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806

  @property
  def strategy(self):
    """Returns default checkpoint name."""
    return self._strategy

  @strategy.setter
  def strategy(self, new_strategy):
    """Sets default summary writer for the current thread."""
    self._strategy = new_strategy

  def build_executor(self,
                     class_ctor=DistributedExecutor,
                     params=None,
                     model_fn=None,
                     loss_fn=None,
                     **kwargs):
    """Creates an executor according to strategy type.

    See doc string of the DistributedExecutor.__init__ for more information of
    the
    input arguments.

    Args:
      class_ctor: A constructor of executor (default: DistributedExecutor).
      params: ParamsDict, all the model parameters and runtime parameters.
      model_fn: Keras model function.
      loss_fn: loss function.
      **kwargs: other arguments to the executor constructor.

    Returns:
      An instance of DistributedExecutor or its subclass.
    """
    if self._strategy is None:
      raise ValueError('`strategy` should not be None. You need to specify '
                       '`strategy_type` in the builder contructor or directly '
                       'set the `strategy` property of the builder.')
    return class_ctor(
        strategy=self._strategy,
        params=params,
        model_fn=model_fn,
        loss_fn=loss_fn,
        **kwargs)