model_training_utils.py 16.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to train BERT models."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

21
import json
22
23
24
25
import os

from absl import logging
import tensorflow as tf
26
from tensorflow.python.util import object_identity
27
from official.utils.misc import distribution_utils
28
from official.utils.misc import tpu_lib
29

30
31
_SUMMARY_TXT = 'training_summary.txt'
_MIN_SUMMARY_STEPS = 10
32

33
34
35
36
37
38
39
40
41
42

def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix):
  """Saves model to with provided checkpoint prefix."""

  checkpoint_path = os.path.join(model_dir, checkpoint_prefix)
  saved_path = checkpoint.save(checkpoint_path)
  logging.info('Saving model as TF checkpoint: %s', saved_path)
  return


43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
def _get_input_iterator(input_fn, strategy):
  """Returns distributed dataset iterator."""

  # When training with TPU pods, datasets needs to be cloned across
  # workers. Since Dataset instance cannot be cloned in eager mode, we instead
  # pass callable that returns a dataset.
  input_data = input_fn()
  if callable(input_data):
    iterator = iter(
        strategy.experimental_distribute_datasets_from_function(input_data))
  else:
    iterator = iter(strategy.experimental_distribute_dataset(input_data))
  return iterator


58
59
60
61
62
63
64
def _float_metric_value(metric):
  """Gets the value of a float-value keras metric."""
  return metric.result().numpy().astype(float)


def _steps_to_run(current_step, steps_per_epoch, steps_per_loop):
  """Calculates steps to run on device."""
65
66
67
  if steps_per_loop <= 0:
    raise ValueError('steps_per_loop should be positive integer.')
  if steps_per_loop == 1:
68
69
70
71
72
73
74
75
    return steps_per_loop
  remainder_in_epoch = current_step % steps_per_epoch
  if remainder_in_epoch != 0:
    return min(steps_per_epoch - remainder_in_epoch, steps_per_loop)
  else:
    return steps_per_loop


76
77
78
79
80
81
82
83
def _write_txt_summary(training_summary, model_dir):
  """Writes a summary text file to record stats."""
  summary_path = os.path.join(model_dir, _SUMMARY_TXT)
  with tf.io.gfile.GFile(summary_path, 'wb') as f:
    logging.info('Training Summary: \n%s', str(training_summary))
    f.write(json.dumps(training_summary, indent=4))


84
85
86
87
88
89
90
91
92
93
def run_customized_training_loop(
    # pylint: disable=invalid-name
    _sentinel=None,
    # pylint: enable=invalid-name
    strategy=None,
    model_fn=None,
    loss_fn=None,
    model_dir=None,
    train_input_fn=None,
    steps_per_epoch=None,
94
    steps_per_loop=1,
95
96
97
98
99
    epochs=1,
    eval_input_fn=None,
    eval_steps=None,
    metric_fn=None,
    init_checkpoint=None,
100
    use_remote_tpu=False,
101
102
    custom_callbacks=None,
    run_eagerly=False):
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
  """Run BERT pretrain model training using low-level API.

  Arguments:
      _sentinel: Used to prevent positional parameters. Internal, do not use.
      strategy: Distribution strategy on which to run low level training loop.
      model_fn: Function that returns a tuple (model, sub_model). Caller of this
        function should add optimizer to the `model` via calling
        `model.compile()` API or manually setting `model.optimizer` attribute.
        Second element of the returned tuple(sub_model) is an optional sub model
        to be used for initial checkpoint -- if provided.
      loss_fn: Function with signature func(labels, logits) and returns a loss
        tensor.
      model_dir: Model directory used during training for restoring/saving model
        weights.
      train_input_fn: Function that returns a tf.data.Dataset used for training.
118
119
120
121
122
123
      steps_per_epoch: Number of steps to run per epoch. At the end of each
        epoch, model checkpoint will be saved and evaluation will be conducted
        if evaluation dataset is provided.
      steps_per_loop: Number of steps per graph-mode loop. In order to reduce
        communication in eager context, training logs are printed every
        steps_per_loop.
124
125
126
127
128
129
130
131
132
133
134
135
      epochs: Number of epochs to train.
      eval_input_fn: Function that returns evaluation dataset. If none,
        evaluation is skipped.
      eval_steps: Number of steps to run evaluation. Required if `eval_input_fn`
        is not none.
      metric_fn: A metrics function that returns a Keras Metric object to record
        evaluation result using evaluation dataset or with training dataset
        after every epoch.
      init_checkpoint: Optional checkpoint to load to `sub_model` returned by
        `model_fn`.
      use_remote_tpu: If true, input pipeline ops are placed in TPU worker host
        as an optimization.
136
      custom_callbacks: A list of Keras Callbacks objects to run during
137
        training. More specifically, `on_batch_begin()`, `on_batch_end()`,
138
        methods are invoked during training.
139
140
      run_eagerly: Whether to run model training in pure eager execution. This
        should be disable for TPUStrategy.
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159

  Returns:
      Trained model.

  Raises:
      ValueError: (1) When model returned by `model_fn` does not have optimizer
        attribute or when required parameters are set to none. (2) eval args are
        not specified correctly. (3) metric_fn must be a callable if specified.
  """

  if _sentinel is not None:
    raise ValueError('only call `run_customized_training_loop()` '
                     'with named arguments.')

  required_arguments = [
      strategy, model_fn, loss_fn, model_dir, steps_per_epoch, train_input_fn
  ]
  if [arg for arg in required_arguments if arg is None]:
    raise ValueError('`strategy`, `model_fn`, `loss_fn`, `model_dir`, '
160
161
162
163
164
165
166
167
                     '`steps_per_loop` and `steps_per_epoch` are required '
                     'parameters.')
  if steps_per_loop > steps_per_epoch:
    logging.error(
        'steps_per_loop: %d is specified to be greater than '
        ' steps_per_epoch: %d, we will use steps_per_epoch as'
        ' steps_per_loop.', steps_per_loop, steps_per_epoch)
    steps_per_loop = steps_per_epoch
168
169
  assert tf.executing_eagerly()

170
171
172
173
174
175
176
177
178
179
  if run_eagerly:
    if steps_per_loop > 1:
      raise ValueError(
          'steps_per_loop is used for performance optimization. When you want '
          'to run eagerly, you cannot leverage graph mode loop.')
    if isinstance(strategy, tf.distribute.experimental.TPUStrategy):
      raise ValueError(
          'TPUStrategy should not run eagerly as it heavily replies on graph'
          ' optimization for the distributed system.')

180
181
182
183
184
185
186
187
  if eval_input_fn and (eval_steps is None or metric_fn is None):
    raise ValueError(
        '`eval_step` and `metric_fn` are required when `eval_input_fn ` '
        'is not none.')
  if metric_fn and not callable(metric_fn):
    raise ValueError(
        'if `metric_fn` is specified, metric_fn must be a callable.')

188
189
  total_training_steps = steps_per_epoch * epochs

190
191
  # To reduce unnecessary send/receive input pipeline operation, we place input
  # pipeline ops in worker task.
192
  with tf.device(tpu_lib.get_primary_cpu_task(use_remote_tpu)):
193
194
    train_iterator = _get_input_iterator(train_input_fn, strategy)

195
    with distribution_utils.get_strategy_scope(strategy):
196
197
198
199
200
201
202
      # To correctly place the model weights on accelerators,
      # model and optimizer should be created in scope.
      model, sub_model = model_fn()
      if not hasattr(model, 'optimizer'):
        raise ValueError('User should set optimizer attribute to model '
                         'inside `model_fn`.')
      optimizer = model.optimizer
203
204
      use_float16 = isinstance(
          optimizer, tf.keras.mixed_precision.experimental.LossScaleOptimizer)
205
206

      if init_checkpoint:
207
208
209
210
211
212
        logging.info(
            'Checkpoint file %s found and restoring from '
            'initial checkpoint for core model.', init_checkpoint)
        checkpoint = tf.train.Checkpoint(model=sub_model)
        checkpoint.restore(init_checkpoint).assert_consumed()
        logging.info('Loading from checkpoint file completed')
213

214
215
      train_loss_metric = tf.keras.metrics.Mean(
          'training_loss', dtype=tf.float32)
216
      eval_metrics = [metric_fn()] if metric_fn else []
217
218
      # If evaluation is required, make a copy of metric as it will be used by
      # both train and evaluation.
219
220
221
222
      train_metrics = [
          metric.__class__.from_config(metric.get_config())
          for metric in eval_metrics
      ]
223

224
225
226
227
228
229
230
231
232
233
234
      # Create summary writers
      eval_summary_writer = tf.summary.create_file_writer(
          os.path.join(model_dir, 'summaries/eval'))
      if steps_per_loop >= _MIN_SUMMARY_STEPS:
        # Only writes summary when the stats are collected sufficiently over
        # enough steps.
        train_summary_writer = tf.summary.create_file_writer(
            os.path.join(model_dir, 'summaries/train'))
      else:
        train_summary_writer = None

235
236
237
238
239
      def _replicated_step(inputs):
        """Replicated training step."""

        inputs, labels = inputs
        with tf.GradientTape() as tape:
240
          model_outputs = model(inputs, training=True)
241
          loss = loss_fn(labels, model_outputs)
242
243
          if use_float16:
            scaled_loss = optimizer.get_scaled_loss(loss)
244

245
        # De-dupes variables due to keras tracking issues.
246
247
        tvars = list(
            object_identity.ObjectIdentitySet(model.trainable_variables))
248
249
250
251
252
        if use_float16:
          scaled_grads = tape.gradient(scaled_loss, tvars)
          grads = optimizer.get_unscaled_gradients(scaled_grads)
        else:
          grads = tape.gradient(loss, tvars)
253
254
255
        optimizer.apply_gradients(zip(grads, tvars))
        # For reporting, the metric takes the mean of losses.
        train_loss_metric.update_state(loss)
256
257
        for metric in train_metrics:
          metric.update_state(labels, model_outputs)
258

259
      @tf.function
260
261
      def train_steps(iterator, steps):
        """Performs distributed training steps in a loop.
262
263
264
265
266

        Args:
          iterator: the distributed iterator of training datasets.
          steps: an tf.int32 integer tensor to specify number of steps to run
            inside host training loop.
267

268
269
270
271
272
273
        Raises:
          ValueError: Any of the arguments or tensor shapes are invalid.
        """
        if not isinstance(steps, tf.Tensor):
          raise ValueError('steps should be an Tensor. Python object may cause '
                           'retracing.')
274

275
276
        for _ in tf.range(steps):
          strategy.experimental_run_v2(_replicated_step, args=(next(iterator),))
277

278
279
280
281
282
      def train_single_step(iterator):
        """Performs a distributed training step.

        Args:
          iterator: the distributed iterator of training datasets.
283

284
285
286
287
288
        Raises:
          ValueError: Any of the arguments or tensor shapes are invalid.
        """
        strategy.experimental_run_v2(_replicated_step, args=(next(iterator),))

289
290
291
292
293
294
295
296
      def test_step(iterator):
        """Calculates evaluation metrics on distributed devices."""

        def _test_step_fn(inputs):
          """Replicated accuracy calculation."""

          inputs, labels = inputs
          model_outputs = model(inputs, training=False)
297
298
          for metric in eval_metrics:
            metric.update_state(labels, model_outputs)
299

300
        strategy.experimental_run_v2(_test_step_fn, args=(next(iterator),))
301

302
303
304
305
      if not run_eagerly:
        train_single_step = tf.function(train_single_step)
        test_step = tf.function(test_step)

306
      def _run_evaluation(current_training_step, test_iterator):
307
308
309
        """Runs validation steps and aggregate metrics."""
        for _ in range(eval_steps):
          test_step(test_iterator)
310

311
        with eval_summary_writer.as_default():
312
313
314
315
316
317
          for metric in eval_metrics + model.metrics:
            metric_value = _float_metric_value(metric)
            logging.info('Step: [%d] Validation %s = %f', current_training_step,
                         metric.name, metric_value)
            tf.summary.scalar(
                metric.name, metric_value, step=current_training_step)
318
          eval_summary_writer.flush()
319

320
      def _run_callbacks_on_batch_begin(batch):
321
322
323
324
        """Runs custom callbacks at the start of every step."""
        if not custom_callbacks:
          return
        for callback in custom_callbacks:
325
          callback.on_batch_begin(batch)
326
327
328
329
330
331
332
333

      def _run_callbacks_on_batch_end(batch):
        """Runs custom callbacks at the end of every step."""
        if not custom_callbacks:
          return
        for callback in custom_callbacks:
          callback.on_batch_end(batch)

334
      # Training loop starts here.
335
      checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
336
337
338
339
340
      latest_checkpoint_file = tf.train.latest_checkpoint(model_dir)
      if latest_checkpoint_file:
        logging.info(
            'Checkpoint file %s found and restoring from '
            'checkpoint', latest_checkpoint_file)
341
        checkpoint.restore(latest_checkpoint_file)
342
343
344
345
346
347
        logging.info('Loading from checkpoint file completed')

      current_step = optimizer.iterations.numpy()
      checkpoint_name = 'ctl_step_{step}.ckpt'

      while current_step < total_training_steps:
348
349
350
        # Training loss/metric are taking average over steps inside micro
        # training loop. We reset the their values before each round.
        train_loss_metric.reset_states()
351
352
        for metric in train_metrics + model.metrics:
          metric.reset_states()
353

354
355
356
        _run_callbacks_on_batch_begin(current_step)
        # Runs several steps in the host while loop.
        steps = _steps_to_run(current_step, steps_per_epoch, steps_per_loop)
357
358
359
360
361
362
363
364
365

        if steps == 1:
          # TODO(zongweiz): merge with train_steps once tf.while_loop
          # GPU performance bugs are fixed.
          train_single_step(train_iterator)
        else:
          # Converts steps to a Tensor to avoid tf.function retracing.
          train_steps(train_iterator,
                      tf.convert_to_tensor(steps, dtype=tf.int32))
366
367
        _run_callbacks_on_batch_end(current_step)
        current_step += steps
368

369
        train_loss = _float_metric_value(train_loss_metric)
370
371
        # Updates training logging.
        training_status = 'Train Step: %d/%d  / loss = %s' % (
372
            current_step, total_training_steps, train_loss)
373

374
375
376
377
        if train_summary_writer:
          with train_summary_writer.as_default():
            tf.summary.scalar(
                train_loss_metric.name, train_loss, step=current_step)
378
379
380
381
            for metric in train_metrics + model.metrics:
              metric_value = _float_metric_value(metric)
              training_status += '  %s = %f' % (metric.name, metric_value)
              tf.summary.scalar(metric.name, metric_value, step=current_step)
382
            train_summary_writer.flush()
383
        logging.info(training_status)
384

385
386
387
388
389
390
391
392
393
394
        # Saves model checkpoints and run validation steps at every epoch end.
        if current_step % steps_per_epoch == 0:
          # To avoid repeated model saving, we do not save after the last
          # step of training.
          if current_step < total_training_steps:
            _save_checkpoint(checkpoint, model_dir,
                             checkpoint_name.format(step=current_step))

          if eval_input_fn:
            logging.info('Running evaluation after step: %s.', current_step)
395
396
            _run_evaluation(current_step,
                            _get_input_iterator(eval_input_fn, strategy))
397
            # Re-initialize evaluation metric.
398
399
            for metric in eval_metrics + model.metrics:
              metric.reset_states()
400
401
402
403
404
405

      _save_checkpoint(checkpoint, model_dir,
                       checkpoint_name.format(step=current_step))

      if eval_input_fn:
        logging.info('Running final evaluation after training is complete.')
406
407
        _run_evaluation(current_step,
                        _get_input_iterator(eval_input_fn, strategy))
408
409
410

      training_summary = {
          'total_training_steps': total_training_steps,
411
          'train_loss': _float_metric_value(train_loss_metric),
412
      }
413
414
      if eval_metrics:
        # TODO(hongkuny): Cleans up summary reporting in text.
415
        training_summary['last_train_metrics'] = _float_metric_value(
416
417
            train_metrics[0])
        training_summary['eval_metricss'] = _float_metric_value(eval_metrics[0])
418

419
      _write_txt_summary(training_summary, model_dir)
420
421

      return model