model_training_utils.py 16.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to train BERT models."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

21
import json
22
23
24
25
import os

from absl import logging
import tensorflow as tf
26
from tensorflow.python.util import object_identity
27
from official.utils.misc import distribution_utils
28

29
30
_SUMMARY_TXT = 'training_summary.txt'
_MIN_SUMMARY_STEPS = 10
31

32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48

def get_primary_cpu_task(use_remote_tpu=False):
  """Returns primary CPU task to which input pipeline Ops are put."""

  # Remote Eager Borg job configures the TPU worker with job name 'worker'.
  return '/job:worker' if use_remote_tpu else ''


def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix):
  """Saves model to with provided checkpoint prefix."""

  checkpoint_path = os.path.join(model_dir, checkpoint_prefix)
  saved_path = checkpoint.save(checkpoint_path)
  logging.info('Saving model as TF checkpoint: %s', saved_path)
  return


49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
def _get_input_iterator(input_fn, strategy):
  """Returns distributed dataset iterator."""

  # When training with TPU pods, datasets needs to be cloned across
  # workers. Since Dataset instance cannot be cloned in eager mode, we instead
  # pass callable that returns a dataset.
  input_data = input_fn()
  if callable(input_data):
    iterator = iter(
        strategy.experimental_distribute_datasets_from_function(input_data))
  else:
    iterator = iter(strategy.experimental_distribute_dataset(input_data))
  return iterator


64
65
66
67
68
69
70
def _float_metric_value(metric):
  """Gets the value of a float-value keras metric."""
  return metric.result().numpy().astype(float)


def _steps_to_run(current_step, steps_per_epoch, steps_per_loop):
  """Calculates steps to run on device."""
71
72
73
  if steps_per_loop <= 0:
    raise ValueError('steps_per_loop should be positive integer.')
  if steps_per_loop == 1:
74
75
76
77
78
79
80
81
    return steps_per_loop
  remainder_in_epoch = current_step % steps_per_epoch
  if remainder_in_epoch != 0:
    return min(steps_per_epoch - remainder_in_epoch, steps_per_loop)
  else:
    return steps_per_loop


82
83
84
85
86
87
88
89
def _write_txt_summary(training_summary, model_dir):
  """Writes a summary text file to record stats."""
  summary_path = os.path.join(model_dir, _SUMMARY_TXT)
  with tf.io.gfile.GFile(summary_path, 'wb') as f:
    logging.info('Training Summary: \n%s', str(training_summary))
    f.write(json.dumps(training_summary, indent=4))


90
91
92
93
94
95
96
97
98
99
def run_customized_training_loop(
    # pylint: disable=invalid-name
    _sentinel=None,
    # pylint: enable=invalid-name
    strategy=None,
    model_fn=None,
    loss_fn=None,
    model_dir=None,
    train_input_fn=None,
    steps_per_epoch=None,
100
    steps_per_loop=1,
101
102
103
104
105
    epochs=1,
    eval_input_fn=None,
    eval_steps=None,
    metric_fn=None,
    init_checkpoint=None,
106
    use_remote_tpu=False,
107
108
    custom_callbacks=None,
    run_eagerly=False):
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
  """Run BERT pretrain model training using low-level API.

  Arguments:
      _sentinel: Used to prevent positional parameters. Internal, do not use.
      strategy: Distribution strategy on which to run low level training loop.
      model_fn: Function that returns a tuple (model, sub_model). Caller of this
        function should add optimizer to the `model` via calling
        `model.compile()` API or manually setting `model.optimizer` attribute.
        Second element of the returned tuple(sub_model) is an optional sub model
        to be used for initial checkpoint -- if provided.
      loss_fn: Function with signature func(labels, logits) and returns a loss
        tensor.
      model_dir: Model directory used during training for restoring/saving model
        weights.
      train_input_fn: Function that returns a tf.data.Dataset used for training.
124
125
126
127
128
129
      steps_per_epoch: Number of steps to run per epoch. At the end of each
        epoch, model checkpoint will be saved and evaluation will be conducted
        if evaluation dataset is provided.
      steps_per_loop: Number of steps per graph-mode loop. In order to reduce
        communication in eager context, training logs are printed every
        steps_per_loop.
130
131
132
133
134
135
136
137
138
139
140
141
      epochs: Number of epochs to train.
      eval_input_fn: Function that returns evaluation dataset. If none,
        evaluation is skipped.
      eval_steps: Number of steps to run evaluation. Required if `eval_input_fn`
        is not none.
      metric_fn: A metrics function that returns a Keras Metric object to record
        evaluation result using evaluation dataset or with training dataset
        after every epoch.
      init_checkpoint: Optional checkpoint to load to `sub_model` returned by
        `model_fn`.
      use_remote_tpu: If true, input pipeline ops are placed in TPU worker host
        as an optimization.
142
      custom_callbacks: A list of Keras Callbacks objects to run during
143
        training. More specifically, `on_batch_begin()`, `on_batch_end()`,
144
        methods are invoked during training.
145
146
      run_eagerly: Whether to run model training in pure eager execution. This
        should be disable for TPUStrategy.
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165

  Returns:
      Trained model.

  Raises:
      ValueError: (1) When model returned by `model_fn` does not have optimizer
        attribute or when required parameters are set to none. (2) eval args are
        not specified correctly. (3) metric_fn must be a callable if specified.
  """

  if _sentinel is not None:
    raise ValueError('only call `run_customized_training_loop()` '
                     'with named arguments.')

  required_arguments = [
      strategy, model_fn, loss_fn, model_dir, steps_per_epoch, train_input_fn
  ]
  if [arg for arg in required_arguments if arg is None]:
    raise ValueError('`strategy`, `model_fn`, `loss_fn`, `model_dir`, '
166
167
168
169
170
171
172
173
                     '`steps_per_loop` and `steps_per_epoch` are required '
                     'parameters.')
  if steps_per_loop > steps_per_epoch:
    logging.error(
        'steps_per_loop: %d is specified to be greater than '
        ' steps_per_epoch: %d, we will use steps_per_epoch as'
        ' steps_per_loop.', steps_per_loop, steps_per_epoch)
    steps_per_loop = steps_per_epoch
174
175
  assert tf.executing_eagerly()

176
177
178
179
180
181
182
183
184
185
  if run_eagerly:
    if steps_per_loop > 1:
      raise ValueError(
          'steps_per_loop is used for performance optimization. When you want '
          'to run eagerly, you cannot leverage graph mode loop.')
    if isinstance(strategy, tf.distribute.experimental.TPUStrategy):
      raise ValueError(
          'TPUStrategy should not run eagerly as it heavily replies on graph'
          ' optimization for the distributed system.')

186
187
188
189
190
191
192
193
  if eval_input_fn and (eval_steps is None or metric_fn is None):
    raise ValueError(
        '`eval_step` and `metric_fn` are required when `eval_input_fn ` '
        'is not none.')
  if metric_fn and not callable(metric_fn):
    raise ValueError(
        'if `metric_fn` is specified, metric_fn must be a callable.')

194
195
  total_training_steps = steps_per_epoch * epochs

196
197
198
  # To reduce unnecessary send/receive input pipeline operation, we place input
  # pipeline ops in worker task.
  with tf.device(get_primary_cpu_task(use_remote_tpu)):
199
200
    train_iterator = _get_input_iterator(train_input_fn, strategy)

201
    with distribution_utils.get_strategy_scope(strategy):
202
203
204
205
206
207
208
      # To correctly place the model weights on accelerators,
      # model and optimizer should be created in scope.
      model, sub_model = model_fn()
      if not hasattr(model, 'optimizer'):
        raise ValueError('User should set optimizer attribute to model '
                         'inside `model_fn`.')
      optimizer = model.optimizer
209
210
      use_float16 = isinstance(
          optimizer, tf.keras.mixed_precision.experimental.LossScaleOptimizer)
211
212

      if init_checkpoint:
213
214
215
216
217
218
        logging.info(
            'Checkpoint file %s found and restoring from '
            'initial checkpoint for core model.', init_checkpoint)
        checkpoint = tf.train.Checkpoint(model=sub_model)
        checkpoint.restore(init_checkpoint).assert_consumed()
        logging.info('Loading from checkpoint file completed')
219

220
221
      train_loss_metric = tf.keras.metrics.Mean(
          'training_loss', dtype=tf.float32)
222
      eval_metrics = [metric_fn()] if metric_fn else []
223
224
      # If evaluation is required, make a copy of metric as it will be used by
      # both train and evaluation.
225
226
227
228
      train_metrics = [
          metric.__class__.from_config(metric.get_config())
          for metric in eval_metrics
      ]
229

230
231
232
233
234
235
236
237
238
239
240
      # Create summary writers
      eval_summary_writer = tf.summary.create_file_writer(
          os.path.join(model_dir, 'summaries/eval'))
      if steps_per_loop >= _MIN_SUMMARY_STEPS:
        # Only writes summary when the stats are collected sufficiently over
        # enough steps.
        train_summary_writer = tf.summary.create_file_writer(
            os.path.join(model_dir, 'summaries/train'))
      else:
        train_summary_writer = None

241
242
243
244
245
246
247
      def _replicated_step(inputs):
        """Replicated training step."""

        inputs, labels = inputs
        with tf.GradientTape() as tape:
          model_outputs = model(inputs)
          loss = loss_fn(labels, model_outputs)
248
249
          if use_float16:
            scaled_loss = optimizer.get_scaled_loss(loss)
250

251
        # De-dupes variables due to keras tracking issues.
252
253
        tvars = list(
            object_identity.ObjectIdentitySet(model.trainable_variables))
254
255
256
257
258
        if use_float16:
          scaled_grads = tape.gradient(scaled_loss, tvars)
          grads = optimizer.get_unscaled_gradients(scaled_grads)
        else:
          grads = tape.gradient(loss, tvars)
259
260
261
        optimizer.apply_gradients(zip(grads, tvars))
        # For reporting, the metric takes the mean of losses.
        train_loss_metric.update_state(loss)
262
263
        for metric in train_metrics:
          metric.update_state(labels, model_outputs)
264

265
      @tf.function
266
267
      def train_steps(iterator, steps):
        """Performs distributed training steps in a loop.
268
269
270
271
272

        Args:
          iterator: the distributed iterator of training datasets.
          steps: an tf.int32 integer tensor to specify number of steps to run
            inside host training loop.
273

274
275
276
277
278
279
        Raises:
          ValueError: Any of the arguments or tensor shapes are invalid.
        """
        if not isinstance(steps, tf.Tensor):
          raise ValueError('steps should be an Tensor. Python object may cause '
                           'retracing.')
280

281
282
        for _ in tf.range(steps):
          strategy.experimental_run_v2(_replicated_step, args=(next(iterator),))
283

284
285
286
287
288
      def train_single_step(iterator):
        """Performs a distributed training step.

        Args:
          iterator: the distributed iterator of training datasets.
289

290
291
292
293
294
        Raises:
          ValueError: Any of the arguments or tensor shapes are invalid.
        """
        strategy.experimental_run_v2(_replicated_step, args=(next(iterator),))

295
296
297
298
299
300
301
302
      def test_step(iterator):
        """Calculates evaluation metrics on distributed devices."""

        def _test_step_fn(inputs):
          """Replicated accuracy calculation."""

          inputs, labels = inputs
          model_outputs = model(inputs, training=False)
303
304
          for metric in eval_metrics:
            metric.update_state(labels, model_outputs)
305

306
        strategy.experimental_run_v2(_test_step_fn, args=(next(iterator),))
307

308
309
310
311
      if not run_eagerly:
        train_single_step = tf.function(train_single_step)
        test_step = tf.function(test_step)

312
      def _run_evaluation(current_training_step, test_iterator):
313
314
315
        """Runs validation steps and aggregate metrics."""
        for _ in range(eval_steps):
          test_step(test_iterator)
316

317
        with eval_summary_writer.as_default():
318
319
320
321
322
323
          for metric in eval_metrics + model.metrics:
            metric_value = _float_metric_value(metric)
            logging.info('Step: [%d] Validation %s = %f', current_training_step,
                         metric.name, metric_value)
            tf.summary.scalar(
                metric.name, metric_value, step=current_training_step)
324
          eval_summary_writer.flush()
325

326
      def _run_callbacks_on_batch_begin(batch):
327
328
329
330
        """Runs custom callbacks at the start of every step."""
        if not custom_callbacks:
          return
        for callback in custom_callbacks:
331
          callback.on_batch_begin(batch)
332
333
334
335
336
337
338
339

      def _run_callbacks_on_batch_end(batch):
        """Runs custom callbacks at the end of every step."""
        if not custom_callbacks:
          return
        for callback in custom_callbacks:
          callback.on_batch_end(batch)

340
      # Training loop starts here.
341
      checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
342
343
344
345
346
      latest_checkpoint_file = tf.train.latest_checkpoint(model_dir)
      if latest_checkpoint_file:
        logging.info(
            'Checkpoint file %s found and restoring from '
            'checkpoint', latest_checkpoint_file)
347
        checkpoint.restore(latest_checkpoint_file)
348
349
350
351
352
353
        logging.info('Loading from checkpoint file completed')

      current_step = optimizer.iterations.numpy()
      checkpoint_name = 'ctl_step_{step}.ckpt'

      while current_step < total_training_steps:
354
355
356
        # Training loss/metric are taking average over steps inside micro
        # training loop. We reset the their values before each round.
        train_loss_metric.reset_states()
357
358
        for metric in train_metrics + model.metrics:
          metric.reset_states()
359

360
361
362
        _run_callbacks_on_batch_begin(current_step)
        # Runs several steps in the host while loop.
        steps = _steps_to_run(current_step, steps_per_epoch, steps_per_loop)
363
364
365
366
367
368
369
370
371

        if steps == 1:
          # TODO(zongweiz): merge with train_steps once tf.while_loop
          # GPU performance bugs are fixed.
          train_single_step(train_iterator)
        else:
          # Converts steps to a Tensor to avoid tf.function retracing.
          train_steps(train_iterator,
                      tf.convert_to_tensor(steps, dtype=tf.int32))
372
373
        _run_callbacks_on_batch_end(current_step)
        current_step += steps
374

375
        train_loss = _float_metric_value(train_loss_metric)
376
377
        # Updates training logging.
        training_status = 'Train Step: %d/%d  / loss = %s' % (
378
            current_step, total_training_steps, train_loss)
379

380
381
382
383
        if train_summary_writer:
          with train_summary_writer.as_default():
            tf.summary.scalar(
                train_loss_metric.name, train_loss, step=current_step)
384
385
386
387
            for metric in train_metrics + model.metrics:
              metric_value = _float_metric_value(metric)
              training_status += '  %s = %f' % (metric.name, metric_value)
              tf.summary.scalar(metric.name, metric_value, step=current_step)
388
            train_summary_writer.flush()
389
        logging.info(training_status)
390

391
392
393
394
395
396
397
398
399
400
        # Saves model checkpoints and run validation steps at every epoch end.
        if current_step % steps_per_epoch == 0:
          # To avoid repeated model saving, we do not save after the last
          # step of training.
          if current_step < total_training_steps:
            _save_checkpoint(checkpoint, model_dir,
                             checkpoint_name.format(step=current_step))

          if eval_input_fn:
            logging.info('Running evaluation after step: %s.', current_step)
401
402
            _run_evaluation(current_step,
                            _get_input_iterator(eval_input_fn, strategy))
403
            # Re-initialize evaluation metric.
404
405
            for metric in eval_metrics + model.metrics:
              metric.reset_states()
406
407
408
409
410
411

      _save_checkpoint(checkpoint, model_dir,
                       checkpoint_name.format(step=current_step))

      if eval_input_fn:
        logging.info('Running final evaluation after training is complete.')
412
413
        _run_evaluation(current_step,
                        _get_input_iterator(eval_input_fn, strategy))
414
415
416

      training_summary = {
          'total_training_steps': total_training_steps,
417
          'train_loss': _float_metric_value(train_loss_metric),
418
      }
419
420
      if eval_metrics:
        # TODO(hongkuny): Cleans up summary reporting in text.
421
        training_summary['last_train_metrics'] = _float_metric_value(
422
423
            train_metrics[0])
        training_summary['eval_metricss'] = _float_metric_value(eval_metrics[0])
424

425
      _write_txt_summary(training_summary, model_dir)
426
427

      return model