model_training_utils.py 21.2 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
15
"""A light weight utilities to train NLP models."""
16
17
18
19
20

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

21
import json
22
import os
23
import tempfile
24
25

from absl import logging
Hongkun Yu's avatar
Hongkun Yu committed
26
import tensorflow as tf
27
from tensorflow.python.util import deprecation
Zongwei Zhou's avatar
Zongwei Zhou committed
28
from official.staging.training import grad_utils
29
from official.utils.misc import distribution_utils
30

31
32
_SUMMARY_TXT = 'training_summary.txt'
_MIN_SUMMARY_STEPS = 10
33

34

35
36
37
38
39
40
41
42
43
def _should_export_checkpoint(strategy):
  return (not strategy) or strategy.extended.should_checkpoint


def _should_export_summary(strategy):
  return (not strategy) or strategy.extended.should_save_summary


def _save_checkpoint(strategy, checkpoint, model_dir, checkpoint_prefix):
44
45
  """Saves model to with provided checkpoint prefix."""

46
47
48
49
50
51
52
53
54
55
56
57
  if _should_export_checkpoint(strategy):
    checkpoint_path = os.path.join(model_dir, checkpoint_prefix)
    saved_path = checkpoint.save(checkpoint_path)
    logging.info('Saving model as TF checkpoint: %s', saved_path)
  else:
    # In multi worker training we need every worker to save checkpoint, because
    # variables can trigger synchronization on read and synchronization needs
    # all workers to participate. To avoid workers overriding each other we save
    # to a temporary directory on non-chief workers.
    tmp_dir = tempfile.mkdtemp()
    checkpoint.save(os.path.join(tmp_dir, 'ckpt'))
    tf.io.gfile.rmtree(tmp_dir)
58
59
60
  return


61
62
63
64
65
def _get_input_iterator(input_fn, strategy):
  """Returns distributed dataset iterator."""
  # When training with TPU pods, datasets needs to be cloned across
  # workers. Since Dataset instance cannot be cloned in eager mode, we instead
  # pass callable that returns a dataset.
Hongkun Yu's avatar
Hongkun Yu committed
66
67
68
69
  if not callable(input_fn):
    raise ValueError('`input_fn` should be a closure that returns a dataset.')
  iterator = iter(
      strategy.experimental_distribute_datasets_from_function(input_fn))
70
71
72
  return iterator


73
74
75
76
77
def _float_metric_value(metric):
  """Gets the value of a float-value keras metric."""
  return metric.result().numpy().astype(float)


78
def steps_to_run(current_step, steps_per_epoch, steps_per_loop):
79
  """Calculates steps to run on device."""
80
81
82
  if steps_per_loop <= 0:
    raise ValueError('steps_per_loop should be positive integer.')
  if steps_per_loop == 1:
83
84
85
86
87
88
89
90
    return steps_per_loop
  remainder_in_epoch = current_step % steps_per_epoch
  if remainder_in_epoch != 0:
    return min(steps_per_epoch - remainder_in_epoch, steps_per_loop)
  else:
    return steps_per_loop


91
def write_txt_summary(training_summary, summary_dir):
92
  """Writes a summary text file to record stats."""
Chen Chen's avatar
Chen Chen committed
93
94
  if not tf.io.gfile.exists(summary_dir):
    tf.io.gfile.mkdir(summary_dir)
95
  summary_path = os.path.join(summary_dir, _SUMMARY_TXT)
96
97
98
99
100
  with tf.io.gfile.GFile(summary_path, 'wb') as f:
    logging.info('Training Summary: \n%s', str(training_summary))
    f.write(json.dumps(training_summary, indent=4))


101
102
@deprecation.deprecated(
    None, 'This function is deprecated. Please use Keras compile/fit instead.')
103
104
105
106
107
108
109
def run_customized_training_loop(
    # pylint: disable=invalid-name
    _sentinel=None,
    # pylint: enable=invalid-name
    strategy=None,
    model_fn=None,
    loss_fn=None,
110
    scale_loss=True,
111
112
113
    model_dir=None,
    train_input_fn=None,
    steps_per_epoch=None,
114
    steps_per_loop=1,
115
116
117
118
119
    epochs=1,
    eval_input_fn=None,
    eval_steps=None,
    metric_fn=None,
    init_checkpoint=None,
120
    custom_callbacks=None,
Chen Chen's avatar
Chen Chen committed
121
    run_eagerly=False,
Zongwei Zhou's avatar
Zongwei Zhou committed
122
123
124
    sub_model_export_name=None,
    explicit_allreduce=False,
    pre_allreduce_callbacks=None,
Chen Chen's avatar
Chen Chen committed
125
126
    post_allreduce_callbacks=None,
    train_summary_interval=0):
127
128
129
130
131
132
133
134
135
136
137
138
  """Run BERT pretrain model training using low-level API.

  Arguments:
      _sentinel: Used to prevent positional parameters. Internal, do not use.
      strategy: Distribution strategy on which to run low level training loop.
      model_fn: Function that returns a tuple (model, sub_model). Caller of this
        function should add optimizer to the `model` via calling
        `model.compile()` API or manually setting `model.optimizer` attribute.
        Second element of the returned tuple(sub_model) is an optional sub model
        to be used for initial checkpoint -- if provided.
      loss_fn: Function with signature func(labels, logits) and returns a loss
        tensor.
139
140
      scale_loss: Whether to divide the raw loss by number of replicas before
        gradients calculation.
141
142
143
      model_dir: Model directory used during training for restoring/saving model
        weights.
      train_input_fn: Function that returns a tf.data.Dataset used for training.
144
145
146
147
148
149
      steps_per_epoch: Number of steps to run per epoch. At the end of each
        epoch, model checkpoint will be saved and evaluation will be conducted
        if evaluation dataset is provided.
      steps_per_loop: Number of steps per graph-mode loop. In order to reduce
        communication in eager context, training logs are printed every
        steps_per_loop.
150
151
152
153
154
155
156
157
158
159
      epochs: Number of epochs to train.
      eval_input_fn: Function that returns evaluation dataset. If none,
        evaluation is skipped.
      eval_steps: Number of steps to run evaluation. Required if `eval_input_fn`
        is not none.
      metric_fn: A metrics function that returns a Keras Metric object to record
        evaluation result using evaluation dataset or with training dataset
        after every epoch.
      init_checkpoint: Optional checkpoint to load to `sub_model` returned by
        `model_fn`.
160
      custom_callbacks: A list of Keras Callbacks objects to run during
161
        training. More specifically, `on_batch_begin()`, `on_batch_end()`,
162
163
        `on_epoch_begin()`, `on_epoch_end()` methods are invoked during
        training.  Note that some metrics may be missing from `logs`.
164
165
      run_eagerly: Whether to run model training in pure eager execution. This
        should be disable for TPUStrategy.
Chen Chen's avatar
Chen Chen committed
166
167
168
169
170
      sub_model_export_name: If not None, will export `sub_model` returned by
        `model_fn` into checkpoint files. The name of intermediate checkpoint
        file is {sub_model_export_name}_step_{step}.ckpt and the last
        checkpint's name is {sub_model_export_name}.ckpt;
        if None, `sub_model` will not be exported as checkpoint.
Zongwei Zhou's avatar
Zongwei Zhou committed
171
172
173
174
175
176
177
178
179
180
      explicit_allreduce: Whether to explicitly perform gradient allreduce,
        instead of relying on implicit allreduce in optimizer.apply_gradients().
        default is False. For now, if training using FP16 mixed precision,
        explicit allreduce will aggregate gradients in FP16 format. For TPU and
        GPU training using FP32, explicit allreduce will aggregate gradients in
        FP32 format.
      pre_allreduce_callbacks: A list of callback functions that takes gradients
        and model variables pairs as input, manipulate them, and returns a new
        gradients and model variables paris. The callback functions will be
        invoked in the list order and before gradients are allreduced.
181
182
183
        With mixed precision training, the pre_allreduce_allbacks will be
        applied on scaled_gradients. Default is no callbacks.
        Only used when explicit_allreduce=True.
Zongwei Zhou's avatar
Zongwei Zhou committed
184
185
186
187
188
189
      post_allreduce_callbacks: A list of callback functions that takes
        gradients and model variables pairs as input, manipulate them, and
        returns a new gradients and model variables paris. The callback
        functions will be invoked in the list order and right before gradients
        are applied to variables for updates. Default is no callbacks. Only used
        when explicit_allreduce=True.
Chen Chen's avatar
Chen Chen committed
190
191
      train_summary_interval: Step interval for training summaries. If the value
        is a negative number, then training summaries are not enabled.
192
193
194
195
196
197
198
199

  Returns:
      Trained model.

  Raises:
      ValueError: (1) When model returned by `model_fn` does not have optimizer
        attribute or when required parameters are set to none. (2) eval args are
        not specified correctly. (3) metric_fn must be a callable if specified.
Chen Chen's avatar
Chen Chen committed
200
201
        (4) sub_model_checkpoint_name is specified, but `sub_model` returned
        by `model_fn` is None.
202
203
204
205
206
207
208
209
210
211
212
  """

  if _sentinel is not None:
    raise ValueError('only call `run_customized_training_loop()` '
                     'with named arguments.')

  required_arguments = [
      strategy, model_fn, loss_fn, model_dir, steps_per_epoch, train_input_fn
  ]
  if [arg for arg in required_arguments if arg is None]:
    raise ValueError('`strategy`, `model_fn`, `loss_fn`, `model_dir`, '
213
214
215
216
217
218
219
220
                     '`steps_per_loop` and `steps_per_epoch` are required '
                     'parameters.')
  if steps_per_loop > steps_per_epoch:
    logging.error(
        'steps_per_loop: %d is specified to be greater than '
        ' steps_per_epoch: %d, we will use steps_per_epoch as'
        ' steps_per_loop.', steps_per_loop, steps_per_epoch)
    steps_per_loop = steps_per_epoch
221
222
  assert tf.executing_eagerly()

223
224
225
  if run_eagerly:
    if isinstance(strategy, tf.distribute.experimental.TPUStrategy):
      raise ValueError(
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
226
          'TPUStrategy should not run eagerly as it heavily relies on graph'
227
228
          ' optimization for the distributed system.')

229
230
231
232
233
234
235
236
  if eval_input_fn and (eval_steps is None or metric_fn is None):
    raise ValueError(
        '`eval_step` and `metric_fn` are required when `eval_input_fn ` '
        'is not none.')
  if metric_fn and not callable(metric_fn):
    raise ValueError(
        'if `metric_fn` is specified, metric_fn must be a callable.')

237
238
  callback_list = tf.keras.callbacks.CallbackList(custom_callbacks)

239
  total_training_steps = steps_per_epoch * epochs
240
241
242
243
244
245
246
247
248
  train_iterator = _get_input_iterator(train_input_fn, strategy)

  with distribution_utils.get_strategy_scope(strategy):
    # To correctly place the model weights on accelerators,
    # model and optimizer should be created in scope.
    model, sub_model = model_fn()
    if not hasattr(model, 'optimizer'):
      raise ValueError('User should set optimizer attribute to model '
                       'inside `model_fn`.')
Chen Chen's avatar
Chen Chen committed
249
250
251
252
    if sub_model_export_name and sub_model is None:
      raise ValueError('sub_model_export_name is specified as %s, but '
                       'sub_model is None.' % sub_model_export_name)

253
254
255
256
257
258
259
    optimizer = model.optimizer

    if init_checkpoint:
      logging.info(
          'Checkpoint file %s found and restoring from '
          'initial checkpoint for core model.', init_checkpoint)
      checkpoint = tf.train.Checkpoint(model=sub_model)
Jing Li's avatar
Jing Li committed
260
      checkpoint.restore(init_checkpoint).assert_existing_objects_matched()
261
262
263
264
265
266
267
268
269
270
271
272
273
      logging.info('Loading from checkpoint file completed')

    train_loss_metric = tf.keras.metrics.Mean(
        'training_loss', dtype=tf.float32)
    eval_metrics = [metric_fn()] if metric_fn else []
    # If evaluation is required, make a copy of metric as it will be used by
    # both train and evaluation.
    train_metrics = [
        metric.__class__.from_config(metric.get_config())
        for metric in eval_metrics
    ]

    # Create summary writers
274
275
276
277
278
279
280
    if _should_export_summary(strategy):
      summary_dir = os.path.join(model_dir, 'summaries')
    else:
      # In multi worker training we need every worker to write summary, because
      # variables can trigger synchronization on read and synchronization needs
      # all workers to participate.
      summary_dir = tempfile.mkdtemp()
281
    eval_summary_writer = tf.summary.create_file_writer(
282
        os.path.join(summary_dir, 'eval'))
Chen Chen's avatar
Chen Chen committed
283
284
    last_summary_step = 0
    if steps_per_loop >= _MIN_SUMMARY_STEPS and train_summary_interval >= 0:
285
286
287
      # Only writes summary when the stats are collected sufficiently over
      # enough steps.
      train_summary_writer = tf.summary.create_file_writer(
288
          os.path.join(summary_dir, 'train'))
289
    else:
Chen Chen's avatar
Chen Chen committed
290
      train_summary_writer = tf.summary.create_noop_writer()
291
292
293
294
295
296
297
298
299
300
301

    # Collects training variables.
    training_vars = model.trainable_variables

    def _replicated_step(inputs):
      """Replicated training step."""

      inputs, labels = inputs
      with tf.GradientTape() as tape:
        model_outputs = model(inputs, training=True)
        loss = loss_fn(labels, model_outputs)
302
303
304
305
306
307
        # Raw loss is used for reporting in metrics/logs.
        raw_loss = loss
        if scale_loss:
          # Scales down the loss for gradients to be invariant from replicas.
          loss = loss / strategy.num_replicas_in_sync

Zongwei Zhou's avatar
Zongwei Zhou committed
308
309
310
311
312
      if explicit_allreduce:
        grad_utils.minimize_using_explicit_allreduce(tape, optimizer, loss,
                                                     training_vars,
                                                     pre_allreduce_callbacks,
                                                     post_allreduce_callbacks)
313
      else:
Zongwei Zhou's avatar
Zongwei Zhou committed
314
315
316
317
318
319
320
321
322
        if isinstance(optimizer,
                      tf.keras.mixed_precision.experimental.LossScaleOptimizer):
          with tape:
            scaled_loss = optimizer.get_scaled_loss(loss)
          scaled_grads = tape.gradient(scaled_loss, training_vars)
          grads = optimizer.get_unscaled_gradients(scaled_grads)
        else:
          grads = tape.gradient(loss, training_vars)
        optimizer.apply_gradients(zip(grads, training_vars))
323
      # For reporting, the metric takes the mean of losses.
324
      train_loss_metric.update_state(raw_loss)
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
      for metric in train_metrics:
        metric.update_state(labels, model_outputs)

    @tf.function
    def train_steps(iterator, steps):
      """Performs distributed training steps in a loop.

      Args:
        iterator: the distributed iterator of training datasets.
        steps: an tf.int32 integer tensor to specify number of steps to run
          inside host training loop.

      Raises:
        ValueError: Any of the arguments or tensor shapes are invalid.
      """
      if not isinstance(steps, tf.Tensor):
        raise ValueError('steps should be an Tensor. Python object may cause '
                         'retracing.')

      for _ in tf.range(steps):
Ken Franko's avatar
Ken Franko committed
345
        strategy.run(_replicated_step, args=(next(iterator),))
346

347
348
    def train_single_step(iterator):
      """Performs a distributed training step.
349

350
351
      Args:
        iterator: the distributed iterator of training datasets.
352

353
354
355
      Raises:
        ValueError: Any of the arguments or tensor shapes are invalid.
      """
Ken Franko's avatar
Ken Franko committed
356
      strategy.run(_replicated_step, args=(next(iterator),))
357

358
359
    def test_step(iterator):
      """Calculates evaluation metrics on distributed devices."""
360

361
362
      def _test_step_fn(inputs):
        """Replicated accuracy calculation."""
363

364
365
366
367
        inputs, labels = inputs
        model_outputs = model(inputs, training=False)
        for metric in eval_metrics:
          metric.update_state(labels, model_outputs)
368

Ken Franko's avatar
Ken Franko committed
369
      strategy.run(_test_step_fn, args=(next(iterator),))
370
371
372
373
374
375

    if not run_eagerly:
      train_single_step = tf.function(train_single_step)
      test_step = tf.function(test_step)

    def _run_evaluation(current_training_step, test_iterator):
376
377
378
379
380
381
382
383
384
      """Runs validation steps and aggregate metrics.

      Args:
        current_training_step: tf.int32 tensor containing the current step.
        test_iterator: distributed iterator of test datasets.

      Returns:
        A dict of metic names and values.
      """
385
386
387
      for _ in range(eval_steps):
        test_step(test_iterator)

388
      logs = {}
389
390
391
      with eval_summary_writer.as_default():
        for metric in eval_metrics + model.metrics:
          metric_value = _float_metric_value(metric)
392
          logs[metric.name] = metric_value
393
394
395
396
397
398
          logging.info('Step: [%d] Validation %s = %f', current_training_step,
                       metric.name, metric_value)
          tf.summary.scalar(
              metric.name, metric_value, step=current_training_step)
        eval_summary_writer.flush()

399
      return logs
400
401

    # Training loop starts here.
Le Hou's avatar
Le Hou committed
402
403
    checkpoint = tf.train.Checkpoint(
        model=model, optimizer=optimizer, global_step=optimizer.iterations)
Chen Chen's avatar
Chen Chen committed
404
    sub_model_checkpoint = tf.train.Checkpoint(
Le Hou's avatar
Le Hou committed
405
406
        model=sub_model,
        global_step=optimizer.iterations) if sub_model_export_name else None
Chen Chen's avatar
Chen Chen committed
407

408
409
410
411
412
413
414
415
416
417
418
419
    latest_checkpoint_file = tf.train.latest_checkpoint(model_dir)
    if latest_checkpoint_file:
      logging.info(
          'Checkpoint file %s found and restoring from '
          'checkpoint', latest_checkpoint_file)
      checkpoint.restore(latest_checkpoint_file)
      logging.info('Loading from checkpoint file completed')

    current_step = optimizer.iterations.numpy()
    checkpoint_name = 'ctl_step_{step}.ckpt'

    while current_step < total_training_steps:
420
421
422
      if current_step % steps_per_epoch == 0:
        callback_list.on_epoch_begin(int(current_step / steps_per_epoch) + 1)

423
424
425
426
427
428
      # Training loss/metric are taking average over steps inside micro
      # training loop. We reset the their values before each round.
      train_loss_metric.reset_states()
      for metric in train_metrics + model.metrics:
        metric.reset_states()

429
      callback_list.on_batch_begin(current_step)
430
      # Runs several steps in the host while loop.
431
      steps = steps_to_run(current_step, steps_per_epoch, steps_per_loop)
432

433
      if tf.config.list_physical_devices('GPU'):
434
435
        # TODO(zongweiz): merge with train_steps once tf.while_loop
        # GPU performance bugs are fixed.
436
437
        for _ in range(steps):
          train_single_step(train_iterator)
438
439
440
441
      else:
        # Converts steps to a Tensor to avoid tf.function retracing.
        train_steps(train_iterator,
                    tf.convert_to_tensor(steps, dtype=tf.int32))
442
      train_loss = _float_metric_value(train_loss_metric)
443
      current_step += steps
444
      callback_list.on_batch_end(current_step - 1, {'loss': train_loss})
445
446
447
448
449

      # Updates training logging.
      training_status = 'Train Step: %d/%d  / loss = %s' % (
          current_step, total_training_steps, train_loss)

Chen Chen's avatar
Chen Chen committed
450
451
452
453
454
455
456
457
458
459
460
461
462
463
      if current_step >= last_summary_step + train_summary_interval:
        summary_writer = train_summary_writer
        last_summary_step = current_step
      else:
        summary_writer = tf.summary.create_noop_writer()

      with summary_writer.as_default():
        tf.summary.scalar(
            train_loss_metric.name, train_loss, step=current_step)
        for metric in train_metrics + model.metrics:
          metric_value = _float_metric_value(metric)
          training_status += '  %s = %f' % (metric.name, metric_value)
          tf.summary.scalar(metric.name, metric_value, step=current_step)
        summary_writer.flush()
464
465
466
      logging.info(training_status)

      if current_step % steps_per_epoch == 0:
467
468
469
470
471
472
473
474
475
        # Save a submodel with the step in the file name after each epoch.
        if sub_model_export_name:
          _save_checkpoint(
              strategy, sub_model_checkpoint, model_dir,
              '%s_step_%d.ckpt' % (sub_model_export_name, current_step))

        # Save model checkpoints and run validation steps after each epoch
        # (with the exception of the final epoch which is handled after the
        # training loop).
476
        if current_step < total_training_steps:
477
          _save_checkpoint(strategy, checkpoint, model_dir,
478
                           checkpoint_name.format(step=current_step))
479
          logs = None
480
481
          if eval_input_fn:
            logging.info('Running evaluation after step: %s.', current_step)
482
483
            logs = _run_evaluation(current_step,
                                   _get_input_iterator(eval_input_fn, strategy))
484
485
486
            # Re-initialize evaluation metric.
            for metric in eval_metrics + model.metrics:
              metric.reset_states()
487

488
489
          callback_list.on_epoch_end(int(current_step / steps_per_epoch), logs)

Chen Chen's avatar
Chen Chen committed
490
    if sub_model_export_name:
491
      _save_checkpoint(strategy, sub_model_checkpoint, model_dir,
Chen Chen's avatar
Chen Chen committed
492
                       '%s.ckpt' % sub_model_export_name)
493

494
495
    _save_checkpoint(strategy, checkpoint, model_dir,
                     checkpoint_name.format(step=current_step))
496
    logs = None
497
498
    if eval_input_fn:
      logging.info('Running final evaluation after training is complete.')
499
500
501
502
      logs = _run_evaluation(current_step,
                             _get_input_iterator(eval_input_fn, strategy))

    callback_list.on_epoch_end(int(current_step / steps_per_epoch), logs)
503

504
505
506
507
    training_summary = {
        'total_training_steps': total_training_steps,
        'train_loss': _float_metric_value(train_loss_metric),
    }
508
509
    for metric in model.metrics:
      training_summary[metric.name] = _float_metric_value(metric)
510
511
512
513
514
    if eval_metrics:
      # TODO(hongkuny): Cleans up summary reporting in text.
      training_summary['last_train_metrics'] = _float_metric_value(
          train_metrics[0])
      training_summary['eval_metrics'] = _float_metric_value(eval_metrics[0])
515

516
    write_txt_summary(training_summary, summary_dir)
517

518
519
520
    if not _should_export_summary(strategy):
      tf.io.gfile.rmtree(summary_dir)

521
    return model