bert_models.py 13.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""BERT models that are compatible with TF 2.0."""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf
22
import tensorflow_hub as hub
23

24
from official.modeling import tf_utils
Chen Chen's avatar
Chen Chen committed
25
from official.nlp.modeling import losses
Hongkun Yu's avatar
Hongkun Yu committed
26
27
from official.nlp.modeling import networks
from official.nlp.modeling.networks import bert_classifier
Chen Chen's avatar
Chen Chen committed
28
from official.nlp.modeling.networks import bert_pretrainer
29
from official.nlp.modeling.networks import bert_span_labeler
30
31
32
33
34


class BertPretrainLossAndMetricLayer(tf.keras.layers.Layer):
  """Returns layer that computes custom loss and metrics for pretraining."""

Chen Chen's avatar
Chen Chen committed
35
  def __init__(self, vocab_size, **kwargs):
36
    super(BertPretrainLossAndMetricLayer, self).__init__(**kwargs)
Chen Chen's avatar
Chen Chen committed
37
38
39
40
    self._vocab_size = vocab_size
    self.config = {
        'vocab_size': vocab_size,
    }
41
42
43
44
45
46

  def __call__(self,
               lm_output,
               sentence_output=None,
               lm_label_ids=None,
               lm_label_weights=None,
47
48
               sentence_labels=None,
               **kwargs):
49
    inputs = tf_utils.pack_inputs([
50
51
52
        lm_output, sentence_output, lm_label_ids, lm_label_weights,
        sentence_labels
    ])
Hongkun Yu's avatar
Hongkun Yu committed
53
54
    return super(BertPretrainLossAndMetricLayer,
                 self).__call__(inputs, **kwargs)
55
56

  def _add_metrics(self, lm_output, lm_labels, lm_label_weights,
Chen Chen's avatar
Chen Chen committed
57
58
                   lm_example_loss, sentence_output, sentence_labels,
                   next_sentence_loss):
59
    """Adds metrics."""
60
61
    masked_lm_accuracy = tf.keras.metrics.sparse_categorical_accuracy(
        lm_labels, lm_output)
62
63
64
    numerator = tf.reduce_sum(masked_lm_accuracy * lm_label_weights)
    denominator = tf.reduce_sum(lm_label_weights) + 1e-5
    masked_lm_accuracy = numerator / denominator
65
66
67
68
69
70
71
72
73
74
75
76
77
    self.add_metric(
        masked_lm_accuracy, name='masked_lm_accuracy', aggregation='mean')

    self.add_metric(lm_example_loss, name='lm_example_loss', aggregation='mean')

    next_sentence_accuracy = tf.keras.metrics.sparse_categorical_accuracy(
        sentence_labels, sentence_output)
    self.add_metric(
        next_sentence_accuracy,
        name='next_sentence_accuracy',
        aggregation='mean')

    self.add_metric(
Chen Chen's avatar
Chen Chen committed
78
        next_sentence_loss, name='next_sentence_loss', aggregation='mean')
79
80

  def call(self, inputs):
81
    """Implements call() for the layer."""
82
    unpacked_inputs = tf_utils.unpack_inputs(inputs)
83
84
    lm_output = unpacked_inputs[0]
    sentence_output = unpacked_inputs[1]
85
    lm_label_ids = unpacked_inputs[2]
86
    lm_label_weights = tf.keras.backend.cast(unpacked_inputs[3], tf.float32)
87
    sentence_labels = unpacked_inputs[4]
Chen Chen's avatar
Chen Chen committed
88
89
90
91
92

    mask_label_loss = losses.weighted_sparse_categorical_crossentropy_loss(
        labels=lm_label_ids, predictions=lm_output, weights=lm_label_weights)
    sentence_loss = losses.weighted_sparse_categorical_crossentropy_loss(
        labels=sentence_labels, predictions=sentence_output)
93
    loss = mask_label_loss + sentence_loss
Chen Chen's avatar
Chen Chen committed
94
    batch_shape = tf.slice(tf.keras.backend.shape(sentence_labels), [0], [1])
95
    # TODO(hongkuny): Avoids the hack and switches add_loss.
Chen Chen's avatar
Chen Chen committed
96
    final_loss = tf.fill(batch_shape, loss)
97
98

    self._add_metrics(lm_output, lm_label_ids, lm_label_weights,
Chen Chen's avatar
Chen Chen committed
99
100
                      mask_label_loss, sentence_output, sentence_labels,
                      sentence_loss)
101
102
103
    return final_loss


A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
104
105
106
def _get_transformer_encoder(bert_config,
                             sequence_length,
                             float_dtype=tf.float32):
107
108
109
110
111
  """Gets a 'TransformerEncoder' object.

  Args:
    bert_config: A 'modeling.BertConfig' object.
    sequence_length: Maximum sequence length of the training data.
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
112
    float_dtype: tf.dtype, tf.float32 or tf.float16.
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129

  Returns:
    A networks.TransformerEncoder object.
  """
  return networks.TransformerEncoder(
      vocab_size=bert_config.vocab_size,
      hidden_size=bert_config.hidden_size,
      num_layers=bert_config.num_hidden_layers,
      num_attention_heads=bert_config.num_attention_heads,
      intermediate_size=bert_config.intermediate_size,
      activation=tf_utils.get_activation('gelu'),
      dropout_rate=bert_config.hidden_dropout_prob,
      attention_dropout_rate=bert_config.attention_probs_dropout_prob,
      sequence_length=sequence_length,
      max_sequence_length=bert_config.max_position_embeddings,
      type_vocab_size=bert_config.type_vocab_size,
      initializer=tf.keras.initializers.TruncatedNormal(
A. Unique TensorFlower's avatar
A. Unique TensorFlower committed
130
131
          stddev=bert_config.initializer_range),
      float_dtype=float_dtype.name)
132
133


134
135
136
137
138
139
140
141
142
143
144
def pretrain_model(bert_config,
                   seq_length,
                   max_predictions_per_seq,
                   initializer=None):
  """Returns model to be used for pre-training.

  Args:
      bert_config: Configuration that defines the core BERT model.
      seq_length: Maximum sequence length of the training data.
      max_predictions_per_seq: Maximum number of tokens in sequence to mask out
        and use for pretraining.
Chen Chen's avatar
Chen Chen committed
145
      initializer: Initializer for weights in BertPretrainer.
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160

  Returns:
      Pretraining model as well as core BERT submodel from which to save
      weights after pretraining.
  """
  input_word_ids = tf.keras.layers.Input(
      shape=(seq_length,), name='input_word_ids', dtype=tf.int32)
  input_mask = tf.keras.layers.Input(
      shape=(seq_length,), name='input_mask', dtype=tf.int32)
  input_type_ids = tf.keras.layers.Input(
      shape=(seq_length,), name='input_type_ids', dtype=tf.int32)
  masked_lm_positions = tf.keras.layers.Input(
      shape=(max_predictions_per_seq,),
      name='masked_lm_positions',
      dtype=tf.int32)
Chen Chen's avatar
Chen Chen committed
161
162
  masked_lm_ids = tf.keras.layers.Input(
      shape=(max_predictions_per_seq,), name='masked_lm_ids', dtype=tf.int32)
163
164
165
166
167
168
169
  masked_lm_weights = tf.keras.layers.Input(
      shape=(max_predictions_per_seq,),
      name='masked_lm_weights',
      dtype=tf.int32)
  next_sentence_labels = tf.keras.layers.Input(
      shape=(1,), name='next_sentence_labels', dtype=tf.int32)

Chen Chen's avatar
Chen Chen committed
170
171
172
173
174
175
176
177
  transformer_encoder = _get_transformer_encoder(bert_config, seq_length)
  if initializer is None:
    initializer = tf.keras.initializers.TruncatedNormal(
        stddev=bert_config.initializer_range)
  pretrainer_model = bert_pretrainer.BertPretrainer(
      network=transformer_encoder,
      num_classes=2,  # The next sentence prediction label has two classes.
      num_token_predictions=max_predictions_per_seq,
178
      initializer=initializer,
Chen Chen's avatar
Chen Chen committed
179
      output='predictions')
180

Chen Chen's avatar
Chen Chen committed
181
182
183
184
185
  lm_output, sentence_output = pretrainer_model(
      [input_word_ids, input_mask, input_type_ids, masked_lm_positions])

  pretrain_loss_layer = BertPretrainLossAndMetricLayer(
      vocab_size=bert_config.vocab_size)
186
187
  output_loss = pretrain_loss_layer(lm_output, sentence_output, masked_lm_ids,
                                    masked_lm_weights, next_sentence_labels)
Chen Chen's avatar
Chen Chen committed
188
  keras_model = tf.keras.Model(
189
190
191
192
193
194
195
196
197
      inputs={
          'input_word_ids': input_word_ids,
          'input_mask': input_mask,
          'input_type_ids': input_type_ids,
          'masked_lm_positions': masked_lm_positions,
          'masked_lm_ids': masked_lm_ids,
          'masked_lm_weights': masked_lm_weights,
          'next_sentence_labels': next_sentence_labels,
      },
Chen Chen's avatar
Chen Chen committed
198
199
      outputs=output_loss)
  return keras_model, transformer_encoder
200
201
202
203
204
205
206
207
208
209
210


class BertSquadLogitsLayer(tf.keras.layers.Layer):
  """Returns a layer that computes custom logits for BERT squad model."""

  def __init__(self, initializer=None, float_type=tf.float32, **kwargs):
    super(BertSquadLogitsLayer, self).__init__(**kwargs)
    self.initializer = initializer
    self.float_type = float_type

  def build(self, unused_input_shapes):
211
    """Implements build() for the layer."""
212
213
214
215
216
    self.final_dense = tf.keras.layers.Dense(
        units=2, kernel_initializer=self.initializer, name='final_dense')
    super(BertSquadLogitsLayer, self).build(unused_input_shapes)

  def call(self, inputs):
217
    """Implements call() for the layer."""
218
219
220
221
222
223
224
225
226
227
228
229
    sequence_output = inputs

    input_shape = sequence_output.shape.as_list()
    sequence_length = input_shape[1]
    num_hidden_units = input_shape[2]

    final_hidden_input = tf.keras.backend.reshape(sequence_output,
                                                  [-1, num_hidden_units])
    logits = self.final_dense(final_hidden_input)
    logits = tf.keras.backend.reshape(logits, [-1, sequence_length, 2])
    logits = tf.transpose(logits, [2, 0, 1])
    unstacked_logits = tf.unstack(logits, axis=0)
230
231
    if self.float_type == tf.float16:
      unstacked_logits = tf.cast(unstacked_logits, tf.float32)
232
233
234
    return unstacked_logits[0], unstacked_logits[1]


Hongkun Yu's avatar
Hongkun Yu committed
235
236
237
238
def squad_model(bert_config,
                max_seq_length,
                float_type,
                initializer=None,
Chen Chen's avatar
Chen Chen committed
239
                hub_module_url=None):
240
241
242
243
244
245
  """Returns BERT Squad model along with core BERT model to import weights.

  Args:
    bert_config: BertConfig, the config defines the core Bert model.
    max_seq_length: integer, the maximum input sequence length.
    float_type: tf.dtype, tf.float32 or tf.bfloat16.
Chen Chen's avatar
Chen Chen committed
246
247
    initializer: Initializer for the final dense layer in the span labeler.
      Defaulted to TruncatedNormal initializer.
Hongkun Yu's avatar
Hongkun Yu committed
248
    hub_module_url: TF-Hub path/url to Bert module.
249
250

  Returns:
251
252
    A tuple of (1) keras model that outputs start logits and end logits and
    (2) the core BERT transformer encoder.
253
  """
Chen Chen's avatar
Chen Chen committed
254
255
256
  if initializer is None:
    initializer = tf.keras.initializers.TruncatedNormal(
        stddev=bert_config.initializer_range)
Chen Chen's avatar
Chen Chen committed
257
  if not hub_module_url:
Chen Chen's avatar
Chen Chen committed
258
259
    bert_encoder = _get_transformer_encoder(bert_config, max_seq_length,
                                            float_type)
260
    return bert_span_labeler.BertSpanLabeler(
Chen Chen's avatar
Chen Chen committed
261
        network=bert_encoder, initializer=initializer), bert_encoder
262

263
  input_word_ids = tf.keras.layers.Input(
264
      shape=(max_seq_length,), dtype=tf.int32, name='input_word_ids')
265
266
267
  input_mask = tf.keras.layers.Input(
      shape=(max_seq_length,), dtype=tf.int32, name='input_mask')
  input_type_ids = tf.keras.layers.Input(
268
      shape=(max_seq_length,), dtype=tf.int32, name='input_type_ids')
Chen Chen's avatar
Chen Chen committed
269
270
271
272
273
274
  core_model = hub.KerasLayer(hub_module_url, trainable=True)
  _, sequence_output = core_model(
      [input_word_ids, input_mask, input_type_ids])
  # Sets the shape manually due to a bug in TF shape inference.
  # TODO(hongkuny): remove this once shape inference is correct.
  sequence_output.set_shape((None, max_seq_length, bert_config.hidden_size))
275
276
277
278
279
280
281

  squad_logits_layer = BertSquadLogitsLayer(
      initializer=initializer, float_type=float_type, name='squad_logits')
  start_logits, end_logits = squad_logits_layer(sequence_output)

  squad = tf.keras.Model(
      inputs={
282
          'input_word_ids': input_word_ids,
283
          'input_mask': input_mask,
284
          'input_type_ids': input_type_ids,
285
      },
286
      outputs=[start_logits, end_logits],
287
288
289
290
291
292
293
294
      name='squad_model')
  return squad, core_model


def classifier_model(bert_config,
                     float_type,
                     num_labels,
                     max_seq_length,
295
296
                     final_layer_initializer=None,
                     hub_module_url=None):
297
298
299
300
301
302
303
304
305
306
307
308
  """BERT classifier model in functional API style.

  Construct a Keras model for predicting `num_labels` outputs from an input with
  maximum sequence length `max_seq_length`.

  Args:
    bert_config: BertConfig, the config defines the core BERT model.
    float_type: dtype, tf.float32 or tf.bfloat16.
    num_labels: integer, the number of classes.
    max_seq_length: integer, the maximum input sequence length.
    final_layer_initializer: Initializer for final dense layer. Defaulted
      TruncatedNormal initializer.
Hongkun Yu's avatar
Hongkun Yu committed
309
    hub_module_url: TF-Hub path/url to Bert module.
310
311
312
313
314
315
316
317
318
319
320

  Returns:
    Combined prediction model (words, mask, type) -> (one-hot labels)
    BERT sub-model (words, mask, type) -> (bert_outputs)
  """
  if final_layer_initializer is not None:
    initializer = final_layer_initializer
  else:
    initializer = tf.keras.initializers.TruncatedNormal(
        stddev=bert_config.initializer_range)

Hongkun Yu's avatar
Hongkun Yu committed
321
  if not hub_module_url:
322
    bert_encoder = _get_transformer_encoder(bert_config, max_seq_length)
Hongkun Yu's avatar
Hongkun Yu committed
323
324
325
326
327
328
329
330
331
332
333
334
335
336
    return bert_classifier.BertClassifier(
        bert_encoder,
        num_classes=num_labels,
        dropout_rate=bert_config.hidden_dropout_prob,
        initializer=initializer), bert_encoder

  input_word_ids = tf.keras.layers.Input(
      shape=(max_seq_length,), dtype=tf.int32, name='input_word_ids')
  input_mask = tf.keras.layers.Input(
      shape=(max_seq_length,), dtype=tf.int32, name='input_mask')
  input_type_ids = tf.keras.layers.Input(
      shape=(max_seq_length,), dtype=tf.int32, name='input_type_ids')
  bert_model = hub.KerasLayer(hub_module_url, trainable=True)
  pooled_output, _ = bert_model([input_word_ids, input_mask, input_type_ids])
337
338
  output = tf.keras.layers.Dropout(rate=bert_config.hidden_dropout_prob)(
      pooled_output)
Hongkun Yu's avatar
Hongkun Yu committed
339

340
341
342
343
344
345
346
347
348
349
350
351
352
  output = tf.keras.layers.Dense(
      num_labels,
      kernel_initializer=initializer,
      name='output',
      dtype=float_type)(
          output)
  return tf.keras.Model(
      inputs={
          'input_word_ids': input_word_ids,
          'input_mask': input_mask,
          'input_type_ids': input_type_ids
      },
      outputs=output), bert_model