run_classifier.py 13.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
15
"""BERT classification finetuning runner in TF 2.x."""
16
17
18
19
20
21
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import json
import math
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
22
import os
23
24
25
26
27
28

from absl import app
from absl import flags
from absl import logging
import tensorflow as tf

29
30
from official.modeling import model_training_utils
from official.nlp import optimization
31
from official.nlp.bert import bert_models
32
from official.nlp.bert import common_flags
33
from official.nlp.bert import configs as bert_configs
34
35
from official.nlp.bert import input_pipeline
from official.nlp.bert import model_saving_utils
36
from official.utils.misc import distribution_utils
37
from official.utils.misc import keras_utils
38

39

40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
flags.DEFINE_enum(
    'mode', 'train_and_eval', ['train_and_eval', 'export_only'],
    'One of {"train_and_eval", "export_only"}. `train_and_eval`: '
    'trains the model and evaluates in the meantime. '
    '`export_only`: will take the latest checkpoint inside '
    'model_dir and export a `SavedModel`.')
flags.DEFINE_string('train_data_path', None,
                    'Path to training data for BERT classifier.')
flags.DEFINE_string('eval_data_path', None,
                    'Path to evaluation data for BERT classifier.')
# Model training specific flags.
flags.DEFINE_string(
    'input_meta_data_path', None,
    'Path to file that contains meta data about input '
    'to be used for training and evaluation.')
flags.DEFINE_integer('train_batch_size', 32, 'Batch size for training.')
56
flags.DEFINE_integer('eval_batch_size', 32, 'Batch size for evaluation.')
57
58

common_flags.define_common_bert_flags()
59
60
61
62

FLAGS = flags.FLAGS


63
def get_loss_fn(num_classes, loss_factor=1.0):
64
65
66
67
68
69
70
71
72
73
74
  """Gets the classification loss function."""

  def classification_loss_fn(labels, logits):
    """Classification loss."""
    labels = tf.squeeze(labels)
    log_probs = tf.nn.log_softmax(logits, axis=-1)
    one_hot_labels = tf.one_hot(
        tf.cast(labels, dtype=tf.int32), depth=num_classes, dtype=tf.float32)
    per_example_loss = -tf.reduce_sum(
        tf.cast(one_hot_labels, dtype=tf.float32) * log_probs, axis=-1)
    loss = tf.reduce_mean(per_example_loss)
75
    loss *= loss_factor
76
77
78
79
80
    return loss

  return classification_loss_fn


Hongkun Yu's avatar
Hongkun Yu committed
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
def get_dataset_fn(input_file_pattern, max_seq_length, global_batch_size,
                   is_training):
  """Gets a closure to create a dataset."""

  def _dataset_fn(ctx=None):
    """Returns tf.data.Dataset for distributed BERT pretraining."""
    batch_size = ctx.get_per_replica_batch_size(
        global_batch_size) if ctx else global_batch_size
    dataset = input_pipeline.create_classifier_dataset(
        input_file_pattern,
        max_seq_length,
        batch_size,
        is_training=is_training,
        input_pipeline_context=ctx)
    return dataset

  return _dataset_fn


A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
100
101
102
103
104
105
106
107
108
109
110
def run_bert_classifier(strategy,
                        bert_config,
                        input_meta_data,
                        model_dir,
                        epochs,
                        steps_per_epoch,
                        steps_per_loop,
                        eval_steps,
                        warmup_steps,
                        initial_lr,
                        init_checkpoint,
Rajagopal Ananthanarayanan's avatar
Rajagopal Ananthanarayanan committed
111
112
                        train_input_fn,
                        eval_input_fn,
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
113
                        custom_callbacks=None,
114
115
                        run_eagerly=False,
                        use_keras_compile_fit=False):
116
117
118
119
120
  """Run BERT classifier training using low-level API."""
  max_seq_length = input_meta_data['max_seq_length']
  num_classes = input_meta_data['num_labels']

  def _get_classifier_model():
121
    """Gets a classifier model."""
122
    classifier_model, core_model = (
123
124
125
126
        bert_models.classifier_model(
            bert_config,
            num_classes,
            max_seq_length,
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
127
128
            hub_module_url=FLAGS.hub_module_url,
            hub_module_trainable=FLAGS.hub_module_trainable))
129
130
    classifier_model.optimizer = optimization.create_optimizer(
        initial_lr, steps_per_epoch * epochs, warmup_steps)
131
132
133
134
135
136
137
    if FLAGS.fp16_implementation == 'graph_rewrite':
      # Note: when flags_obj.fp16_implementation == "graph_rewrite", dtype as
      # determined by flags_core.get_tf_dtype(flags_obj) would be 'float32'
      # which will ensure tf.compat.v2.keras.mixed_precision and
      # tf.train.experimental.enable_mixed_precision_graph_rewrite do not double
      # up.
      classifier_model.optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(
138
          classifier_model.optimizer)
139
140
    return classifier_model, core_model

141
142
143
144
145
146
147
148
149
150
151
  # During distributed training, loss used for gradient computation is
  # summed over from all replicas. When Keras compile/fit() API is used,
  # the fit() API internally normalizes the loss by dividing the loss by
  # the number of replicas used for computation. However, when custom
  # training loop is used this is not done automatically and should be
  # done manually by the end user.
  loss_multiplier = 1.0
  if FLAGS.scale_loss and not use_keras_compile_fit:
    loss_multiplier = 1.0 / strategy.num_replicas_in_sync

  loss_fn = get_loss_fn(num_classes, loss_factor=loss_multiplier)
152
153
154
155
156
157
158

  # Defines evaluation metrics function, which will create metrics in the
  # correct device and strategy scope.
  def metric_fn():
    return tf.keras.metrics.SparseCategoricalAccuracy(
        'test_accuracy', dtype=tf.float32)

159
  if use_keras_compile_fit:
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
160
161
    # Start training using Keras compile/fit API.
    logging.info('Training using TF 2.0 Keras compile/fit API with '
Rajagopal Ananthanarayanan's avatar
Rajagopal Ananthanarayanan committed
162
                 'distribution strategy.')
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
    return run_keras_compile_fit(
        model_dir,
        strategy,
        _get_classifier_model,
        train_input_fn,
        eval_input_fn,
        loss_fn,
        metric_fn,
        init_checkpoint,
        epochs,
        steps_per_epoch,
        eval_steps,
        custom_callbacks=None)

  # Use user-defined loop to start training.
  logging.info('Training using customized training loop TF 2.0 with '
Rajagopal Ananthanarayanan's avatar
Rajagopal Ananthanarayanan committed
179
               'distribution strategy.')
180
181
182
183
184
185
  return model_training_utils.run_customized_training_loop(
      strategy=strategy,
      model_fn=_get_classifier_model,
      loss_fn=loss_fn,
      model_dir=model_dir,
      steps_per_epoch=steps_per_epoch,
186
      steps_per_loop=steps_per_loop,
187
188
189
190
191
192
      epochs=epochs,
      train_input_fn=train_input_fn,
      eval_input_fn=eval_input_fn,
      eval_steps=eval_steps,
      init_checkpoint=init_checkpoint,
      metric_fn=metric_fn,
193
194
      custom_callbacks=custom_callbacks,
      run_eagerly=run_eagerly)
195
196


A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
def run_keras_compile_fit(model_dir,
                          strategy,
                          model_fn,
                          train_input_fn,
                          eval_input_fn,
                          loss_fn,
                          metric_fn,
                          init_checkpoint,
                          epochs,
                          steps_per_epoch,
                          eval_steps,
                          custom_callbacks=None):
  """Runs BERT classifier model using Keras compile/fit API."""

  with strategy.scope():
    training_dataset = train_input_fn()
    evaluation_dataset = eval_input_fn()
    bert_model, sub_model = model_fn()
    optimizer = bert_model.optimizer

    if init_checkpoint:
      checkpoint = tf.train.Checkpoint(model=sub_model)
      checkpoint.restore(init_checkpoint).assert_existing_objects_matched()

    bert_model.compile(optimizer=optimizer, loss=loss_fn, metrics=[metric_fn()])

223
224
225
226
227
    summary_dir = os.path.join(model_dir, 'summaries')
    summary_callback = tf.keras.callbacks.TensorBoard(summary_dir)
    checkpoint_path = os.path.join(model_dir, 'checkpoint')
    checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
        checkpoint_path, save_weights_only=True)
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244

    if custom_callbacks is not None:
      custom_callbacks += [summary_callback, checkpoint_callback]
    else:
      custom_callbacks = [summary_callback, checkpoint_callback]

    bert_model.fit(
        x=training_dataset,
        validation_data=evaluation_dataset,
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        validation_steps=eval_steps,
        callbacks=custom_callbacks)

    return bert_model


245
def export_classifier(model_export_path, input_meta_data,
Rajagopal Ananthanarayanan's avatar
Rajagopal Ananthanarayanan committed
246
247
                      restore_model_using_load_weights,
                      bert_config, model_dir):
248
249
250
251
252
  """Exports a trained model as a `SavedModel` for inference.

  Args:
    model_export_path: a string specifying the path to the SavedModel directory.
    input_meta_data: dictionary containing meta data about input and model.
253
254
255
256
257
258
    restore_model_using_load_weights: Whether to use checkpoint.restore() API
      for custom checkpoint or to use model.load_weights() API.
      There are 2 different ways to save checkpoints. One is using
      tf.train.Checkpoint and another is using Keras model.save_weights().
      Custom training loop implementation uses tf.train.Checkpoint API
      and Keras ModelCheckpoint callback internally uses model.save_weights()
Rajagopal Ananthanarayanan's avatar
Rajagopal Ananthanarayanan committed
259
      API. Since these two API's cannot be used together, model loading logic
260
      must be take into account how model checkpoint was saved.
Rajagopal Ananthanarayanan's avatar
Rajagopal Ananthanarayanan committed
261
262
263
    bert_config: Bert configuration file to define core bert layers.
    model_dir: The directory where the model weights and training/evaluation
      summaries are stored.
264
265
266
267
268
269

  Raises:
    Export path is not specified, got an empty string or None.
  """
  if not model_export_path:
    raise ValueError('Export path is not specified: %s' % model_export_path)
Rajagopal Ananthanarayanan's avatar
Rajagopal Ananthanarayanan committed
270
271
  if not model_dir:
    raise ValueError('Export path is not specified: %s' % model_dir)
272

Zongwei Zhou's avatar
Zongwei Zhou committed
273
274
  # Export uses float32 for now, even if training uses mixed precision.
  tf.keras.mixed_precision.experimental.set_policy('float32')
275
  classifier_model = bert_models.classifier_model(
Zongwei Zhou's avatar
Zongwei Zhou committed
276
      bert_config, input_meta_data['num_labels'],
277
      input_meta_data['max_seq_length'])[0]
278

279
  model_saving_utils.export_bert_model(
280
281
      model_export_path,
      model=classifier_model,
Rajagopal Ananthanarayanan's avatar
Rajagopal Ananthanarayanan committed
282
      checkpoint_dir=model_dir,
283
      restore_model_using_load_weights=restore_model_using_load_weights)
284
285


Hongkun Yu's avatar
Hongkun Yu committed
286
287
def run_bert(strategy,
             input_meta_data,
288
             model_config,
Hongkun Yu's avatar
Hongkun Yu committed
289
290
             train_input_fn=None,
             eval_input_fn=None):
291
292
  """Run BERT training."""
  if FLAGS.mode == 'export_only':
293
294
295
296
    # As Keras ModelCheckpoint callback used with Keras compile/fit() API
    # internally uses model.save_weights() to save checkpoints, we must
    # use model.load_weights() when Keras compile/fit() is used.
    export_classifier(FLAGS.model_export_path, input_meta_data,
Rajagopal Ananthanarayanan's avatar
Rajagopal Ananthanarayanan committed
297
                      FLAGS.use_keras_compile_fit,
298
                      model_config, FLAGS.model_dir)
299
300
301
302
    return

  if FLAGS.mode != 'train_and_eval':
    raise ValueError('Unsupported mode is specified: %s' % FLAGS.mode)
303
304
  # Enables XLA in Session Config. Should not be set for TPU.
  keras_utils.set_config_v2(FLAGS.enable_xla)
305
306
307
308
309
310
311
312
313
314

  epochs = FLAGS.num_train_epochs
  train_data_size = input_meta_data['train_data_size']
  steps_per_epoch = int(train_data_size / FLAGS.train_batch_size)
  warmup_steps = int(epochs * train_data_size * 0.1 / FLAGS.train_batch_size)
  eval_steps = int(
      math.ceil(input_meta_data['eval_data_size'] / FLAGS.eval_batch_size))

  if not strategy:
    raise ValueError('Distribution strategy has not been specified.')
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
315
316

  trained_model = run_bert_classifier(
317
      strategy,
318
      model_config,
319
320
321
322
      input_meta_data,
      FLAGS.model_dir,
      epochs,
      steps_per_epoch,
323
      FLAGS.steps_per_loop,
324
325
326
327
      eval_steps,
      warmup_steps,
      FLAGS.learning_rate,
      FLAGS.init_checkpoint,
Rajagopal Ananthanarayanan's avatar
Rajagopal Ananthanarayanan committed
328
329
      train_input_fn,
      eval_input_fn,
330
331
      run_eagerly=FLAGS.run_eagerly,
      use_keras_compile_fit=FLAGS.use_keras_compile_fit)
332

333
  if FLAGS.model_export_path:
334
335
336
    # As Keras ModelCheckpoint callback used with Keras compile/fit() API
    # internally uses model.save_weights() to save checkpoints, we must
    # use model.load_weights() when Keras compile/fit() is used.
337
    model_saving_utils.export_bert_model(
338
339
340
        FLAGS.model_export_path,
        model=trained_model,
        restore_model_using_load_weights=FLAGS.use_keras_compile_fit)
341
342
  return trained_model

343
344
345
346

def main(_):
  # Users should always run this script under TF 2.x
  assert tf.version.VERSION.startswith('2.')
347

348
349
350
351
352
353
  with tf.io.gfile.GFile(FLAGS.input_meta_data_path, 'rb') as reader:
    input_meta_data = json.loads(reader.read().decode('utf-8'))

  if not FLAGS.model_dir:
    FLAGS.model_dir = '/tmp/bert20/'

354
355
356
357
  strategy = distribution_utils.get_distribution_strategy(
      distribution_strategy=FLAGS.distribution_strategy,
      num_gpus=FLAGS.num_gpus,
      tpu_address=FLAGS.tpu)
Rajagopal Ananthanarayanan's avatar
Rajagopal Ananthanarayanan committed
358
  max_seq_length = input_meta_data['max_seq_length']
Hongkun Yu's avatar
Hongkun Yu committed
359
  train_input_fn = get_dataset_fn(
Rajagopal Ananthanarayanan's avatar
Rajagopal Ananthanarayanan committed
360
      FLAGS.train_data_path,
Hongkun Yu's avatar
Hongkun Yu committed
361
362
363
364
      max_seq_length,
      FLAGS.train_batch_size,
      is_training=True)
  eval_input_fn = get_dataset_fn(
Rajagopal Ananthanarayanan's avatar
Rajagopal Ananthanarayanan committed
365
      FLAGS.eval_data_path,
Hongkun Yu's avatar
Hongkun Yu committed
366
367
368
369
      max_seq_length,
      FLAGS.eval_batch_size,
      is_training=False)

370
371
372
  bert_config = bert_configs.BertConfig.from_json_file(FLAGS.bert_config_file)
  run_bert(strategy, input_meta_data, bert_config, train_input_fn,
           eval_input_fn)
373
374
375
376
377


if __name__ == '__main__':
  flags.mark_flag_as_required('bert_config_file')
  flags.mark_flag_as_required('input_meta_data_path')
378
  flags.mark_flag_as_required('model_dir')
379
  app.run(main)