"official/legacy/bert/README.md" did not exist on "252e63849b5cf8dd12d6930dc2e9f8c51ea70251"
modeling_tf_roberta.py 19.5 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 RoBERTa model. """

from __future__ import (absolute_import, division, print_function,
                        unicode_literals)

import logging

import numpy as np
import tensorflow as tf

from .configuration_roberta import RobertaConfig
thomwolf's avatar
thomwolf committed
27
from .modeling_tf_utils import TFPreTrainedModel, get_initializer
thomwolf's avatar
thomwolf committed
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
from .file_utils import add_start_docstrings
from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model

from .modeling_tf_bert import TFBertEmbeddings, TFBertMainLayer, gelu, gelu_new

logger = logging.getLogger(__name__)

TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP = {
    'roberta-base': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-base-tf_model.h5",
    'roberta-large': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-tf_model.h5",
    'roberta-large-mnli': "https://s3.amazonaws.com/models.huggingface.co/bert/roberta-large-mnli-tf_model.h5",
}

def load_roberta_pt_weights_in_tf2(tf_model, pytorch_checkpoint_path):
    # build the network
    inputs_list = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
    tf_inputs = tf.constant(inputs_list)
    tfo = tf_model(tf_inputs, training=False)
    return load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path, tf_inputs=tf_inputs)


class TFRobertaEmbeddings(TFBertEmbeddings):
    """
    Same as BertEmbeddings with a tiny tweak for positional embeddings indexing.
    """
    def __init__(self, config, **kwargs):
        super(TFRobertaEmbeddings, self).__init__(config, **kwargs)
        self.padding_idx = 1

    def _embedding(self, inputs, training=False):
        """Applies embedding based on inputs tensor."""
        input_ids, position_ids, token_type_ids = inputs

        seq_length = tf.shape(input_ids)[1]
        if position_ids is None:
            position_ids = tf.range(self.padding_idx+1, seq_length+self.padding_idx+1, dtype=tf.int32)[tf.newaxis, :]

        return super(TFRobertaEmbeddings, self)._embedding([input_ids, position_ids, token_type_ids], training=training)


class TFRobertaMainLayer(TFBertMainLayer):
    """
    Same as TFBertMainLayer but uses TFRobertaEmbeddings.
    """
    def __init__(self, config, **kwargs):
        super(TFRobertaMainLayer, self).__init__(config, **kwargs)
        self.embeddings = TFRobertaEmbeddings(config, name='embeddings')

thomwolf's avatar
thomwolf committed
76
    def call(self, inputs, **kwargs):
thomwolf's avatar
thomwolf committed
77
        # Check that input_ids starts with control token
thomwolf's avatar
thomwolf committed
78
        if isinstance(inputs, (tuple, list)):
thomwolf's avatar
thomwolf committed
79
            input_ids = inputs[0]
thomwolf's avatar
thomwolf committed
80
        elif isinstance(inputs, dict):
thomwolf's avatar
thomwolf committed
81
            input_ids = inputs.get('input_ids')
thomwolf's avatar
thomwolf committed
82
83
        else:
            input_ids = inputs
thomwolf's avatar
thomwolf committed
84
85
86
87
88
89

        if tf.not_equal(tf.reduce_sum(input_ids[:, 0]), 0):
            logger.warning("A sequence with no special tokens has been passed to the RoBERTa model. "
                           "This model requires special tokens in order to work. "
                           "Please specify add_special_tokens=True in your encoding.")

thomwolf's avatar
thomwolf committed
90
        return super(TFRobertaMainLayer, self).call(inputs, **kwargs)
thomwolf's avatar
thomwolf committed
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113


class TFRobertaPreTrainedModel(TFPreTrainedModel):
    """ An abstract class to handle weights initialization and
        a simple interface for dowloading and loading pretrained models.
    """
    config_class = RobertaConfig
    pretrained_model_archive_map = TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_MAP
    load_pt_weights = load_roberta_pt_weights_in_tf2
    base_model_prefix = "roberta"


ROBERTA_START_DOCSTRING = r"""    The RoBERTa model was proposed in
    `RoBERTa: A Robustly Optimized BERT Pretraining Approach`_
    by Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer,
    Veselin Stoyanov. It is based on Google's BERT model released in 2018.
    
    It builds on BERT and modifies key hyperparameters, removing the next-sentence pretraining
    objective and training with much larger mini-batches and learning rates.
    
    This implementation is the same as BertModel with a tiny embeddings tweak as well as a setup for Roberta pretrained 
    models.

thomwolf's avatar
thomwolf committed
114
115
    This model is a tf.keras.Model `tf.keras.Model`_ sub-class. Use it as a regular TF 2.0 Keras Model and
    refer to the TF 2.0 documentation for all matter related to general usage and behavior.
thomwolf's avatar
thomwolf committed
116
117
118
119

    .. _`RoBERTa: A Robustly Optimized BERT Pretraining Approach`:
        https://arxiv.org/abs/1907.11692

thomwolf's avatar
thomwolf committed
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
    .. _`tf.keras.Model`:
        https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/keras/Model

    Note on the model inputs:
        TF 2.0 models accepts two formats as inputs:

            - having all inputs as keyword arguments (like PyTorch models), or
            - having all inputs as a list, tuple or dict in the first positional arguments.

        This second option is usefull when using `tf.keras.Model.fit()` method which currently requires having all the tensors in the first argument of the model call function: `model(inputs)`.

        If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument :

        - a single Tensor with input_ids only and nothing else: `model(inputs_ids)
        - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
            `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
        - a dictionary with one or several input Tensors associaed to the input names given in the docstring:
            `model({'input_ids': input_ids, 'token_type_ids': token_type_ids})`
thomwolf's avatar
thomwolf committed
138
139
140
141
142
143
144
145
146

    Parameters:
        config (:class:`~pytorch_transformers.RobertaConfig`): Model configuration class with all the parameters of the 
            model. Initializing with a config file does not load the weights associated with the model, only the configuration.
            Check out the :meth:`~pytorch_transformers.PreTrainedModel.from_pretrained` method to load the model weights.
"""

ROBERTA_INPUTS_DOCSTRING = r"""
    Inputs:
thomwolf's avatar
thomwolf committed
147
        **input_ids**: ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
thomwolf's avatar
thomwolf committed
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
            Indices of input sequence tokens in the vocabulary.
            To match pre-training, RoBERTa input sequence should be formatted with <s> and </s> tokens as follows:

            (a) For sequence pairs:

                ``tokens:         <s> Is this Jacksonville ? </s> </s> No it is not . </s>``

            (b) For single sequences:

                ``tokens:         <s> the dog is hairy . </s>``

            Fully encoded sequences or sequence pairs can be obtained using the RobertaTokenizer.encode function with 
            the ``add_special_tokens`` parameter set to ``True``.

            RoBERTa is a model with absolute position embeddings so it's usually advised to pad the inputs on
            the right rather than the left.

            See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
            :func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
thomwolf's avatar
thomwolf committed
167
        **attention_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
thomwolf's avatar
thomwolf committed
168
169
170
            Mask to avoid performing attention on padding token indices.
            Mask values selected in ``[0, 1]``:
            ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
thomwolf's avatar
thomwolf committed
171
        **token_type_ids**: (`optional` need to be trained) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
thomwolf's avatar
thomwolf committed
172
173
174
175
176
177
            Optional segment token indices to indicate first and second portions of the inputs.
            This embedding matrice is not trained (not pretrained during RoBERTa pretraining), you will have to train it
            during finetuning.
            Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
            corresponds to a `sentence B` token
            (see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
thomwolf's avatar
thomwolf committed
178
        **position_ids**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
thomwolf's avatar
thomwolf committed
179
180
            Indices of positions of each input sequence tokens in the position embeddings.
            Selected in the range ``[0, config.max_position_embeddings - 1[``.
thomwolf's avatar
thomwolf committed
181
        **head_mask**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
thomwolf's avatar
thomwolf committed
182
183
184
185
186
187
188
189
190
191
            Mask to nullify selected heads of the self-attention modules.
            Mask values selected in ``[0, 1]``:
            ``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
"""

@add_start_docstrings("The bare RoBERTa Model transformer outputing raw hidden-states without any specific head on top.",
                      ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)
class TFRobertaModel(TFRobertaPreTrainedModel):
    r"""
    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
thomwolf's avatar
thomwolf committed
192
        **last_hidden_state**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, hidden_size)``
thomwolf's avatar
thomwolf committed
193
            Sequence of hidden-states at the output of the last layer of the model.
thomwolf's avatar
thomwolf committed
194
        **pooler_output**: ``tf.Tensor`` of shape ``(batch_size, hidden_size)``
thomwolf's avatar
thomwolf committed
195
196
197
198
199
200
201
            Last layer hidden-state of the first token of the sequence (classification token)
            further processed by a Linear layer and a Tanh activation function. The Linear
            layer weights are trained from the next sentence prediction (classification)
            objective during Bert pretraining. This output is usually *not* a good summary
            of the semantic content of the input, you're often better with averaging or pooling
            the sequence of hidden-states for the whole input sequence.
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
thomwolf's avatar
thomwolf committed
202
            list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings)
thomwolf's avatar
thomwolf committed
203
204
205
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
thomwolf's avatar
thomwolf committed
206
            list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
thomwolf's avatar
thomwolf committed
207
208
209
210
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.

    Examples::

thomwolf's avatar
thomwolf committed
211
212
213
        import tensorflow as tf
        from pytorch_transformers import RobertaTokenizer, TFRobertaModel

thomwolf's avatar
thomwolf committed
214
        tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
thomwolf's avatar
thomwolf committed
215
216
        model = TFRobertaModel.from_pretrained('roberta-base')
        input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :]  # Batch size 1
thomwolf's avatar
thomwolf committed
217
218
219
220
221
222
223
224
        outputs = model(input_ids)
        last_hidden_states = outputs[0]  # The last hidden-state is the first element of the output tuple

    """
    def __init__(self, config, *inputs, **kwargs):
        super(TFRobertaModel, self).__init__(config, *inputs, **kwargs)
        self.roberta = TFRobertaMainLayer(config, name='roberta')

thomwolf's avatar
thomwolf committed
225
226
    def call(self, inputs, **kwargs):
        outputs = self.roberta(inputs, **kwargs)
thomwolf's avatar
thomwolf committed
227
228
229
230
231
232
233
234
        return outputs


class TFRobertaLMHead(tf.keras.layers.Layer):
    """Roberta Head for masked language modeling."""
    def __init__(self, config, input_embeddings, **kwargs):
        super(TFRobertaLMHead, self).__init__(**kwargs)
        self.vocab_size = config.vocab_size
thomwolf's avatar
thomwolf committed
235
236
237
        self.dense = tf.keras.layers.Dense(config.hidden_size,
                                           kernel_initializer=get_initializer(config.initializer_range),
                                           name='dense')
thomwolf's avatar
thomwolf committed
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
        self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name='layer_norm')
        self.act = tf.keras.layers.Activation(gelu)

        # The output weights are the same as the input embeddings, but there is
        # an output-only bias for each token.
        self.decoder = input_embeddings

    def build(self, input_shape):
        self.bias = self.add_weight(shape=(self.vocab_size,),
                                    initializer='zeros',
                                    trainable=True,
                                    name='bias')
        super(TFRobertaLMHead, self).build(input_shape)

    def call(self, features):
        x = self.dense(features)
        x = self.act(x)
        x = self.layer_norm(x)

        # project back to size of vocabulary with bias
        x = self.decoder(x, mode="linear") + self.bias

        return x


@add_start_docstrings("""RoBERTa Model with a `language modeling` head on top. """,
    ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)
class TFRobertaForMaskedLM(TFRobertaPreTrainedModel):
    r"""
thomwolf's avatar
thomwolf committed
267
        **masked_lm_labels**: (`optional`) ``Numpy array`` or ``tf.Tensor`` of shape ``(batch_size, sequence_length)``:
thomwolf's avatar
thomwolf committed
268
269
270
271
272
273
            Labels for computing the masked language modeling loss.
            Indices should be in ``[-1, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring)
            Tokens with indices set to ``-1`` are ignored (masked), the loss is only computed for the tokens with labels
            in ``[0, ..., config.vocab_size]``

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
thomwolf's avatar
thomwolf committed
274
        **loss**: (`optional`, returned when ``masked_lm_labels`` is provided) ``tf.Tensor`` of shape ``(1,)``:
thomwolf's avatar
thomwolf committed
275
            Masked language modeling loss.
thomwolf's avatar
thomwolf committed
276
        **prediction_scores**: ``tf.Tensor`` of shape ``(batch_size, sequence_length, config.vocab_size)``
thomwolf's avatar
thomwolf committed
277
278
            Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
thomwolf's avatar
thomwolf committed
279
            list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings)
thomwolf's avatar
thomwolf committed
280
281
282
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
thomwolf's avatar
thomwolf committed
283
            list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
thomwolf's avatar
thomwolf committed
284
285
286
287
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.

    Examples::

thomwolf's avatar
thomwolf committed
288
289
290
        import tensorflow as tf
        from pytorch_transformers import RobertaTokenizer, TFRobertaForMaskedLM

thomwolf's avatar
thomwolf committed
291
        tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
thomwolf's avatar
thomwolf committed
292
293
        model = TFRobertaForMaskedLM.from_pretrained('roberta-base')
        input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :]  # Batch size 1
thomwolf's avatar
thomwolf committed
294
        outputs = model(input_ids, masked_lm_labels=input_ids)
thomwolf's avatar
thomwolf committed
295
        prediction_scores = outputs[0]
thomwolf's avatar
thomwolf committed
296
297
298
299
300
301
302
303

    """
    def __init__(self, config, *inputs, **kwargs):
        super(TFRobertaForMaskedLM, self).__init__(config, *inputs, **kwargs)

        self.roberta = TFRobertaMainLayer(config, name="roberta")
        self.lm_head = TFRobertaLMHead(config, self.roberta.embeddings, name="lm_head")

thomwolf's avatar
thomwolf committed
304
305
    def call(self, inputs, **kwargs):
        outputs = self.roberta(inputs, **kwargs)
thomwolf's avatar
thomwolf committed
306
307
308
309
310
311
312
313
314
315
316
317
318
319

        sequence_output = outputs[0]
        prediction_scores = self.lm_head(sequence_output)

        outputs = (prediction_scores,) + outputs[2:]  # Add hidden states and attention if they are here

        return outputs  # prediction_scores, (hidden_states), (attentions)


class TFRobertaClassificationHead(tf.keras.layers.Layer):
    """Head for sentence-level classification tasks."""

    def __init__(self, config, **kwargs):
        super(TFRobertaClassificationHead, self).__init__(config, **kwargs)
thomwolf's avatar
thomwolf committed
320
321
322
323
        self.dense = tf.keras.layers.Dense(config.hidden_size,
                                           kernel_initializer=get_initializer(config.initializer_range),
                                           activation='tanh',
                                           name="dense")
thomwolf's avatar
thomwolf committed
324
        self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
thomwolf's avatar
thomwolf committed
325
326
327
        self.out_proj = tf.keras.layers.Dense(config.num_labels,
                                              kernel_initializer=get_initializer(config.initializer_range),
                                              name="out_proj")
thomwolf's avatar
thomwolf committed
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343

    def call(self, features, training=False):
        x = features[:, 0, :]  # take <s> token (equiv. to [CLS])
        x = self.dropout(x, training=training)
        x = self.dense(x)
        x = self.dropout(x, training=training)
        x = self.out_proj(x)
        return x


@add_start_docstrings("""RoBERTa Model transformer with a sequence classification/regression head on top (a linear layer 
    on top of the pooled output) e.g. for GLUE tasks. """,
    ROBERTA_START_DOCSTRING, ROBERTA_INPUTS_DOCSTRING)
class TFRobertaForSequenceClassification(TFRobertaPreTrainedModel):
    r"""
    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
thomwolf's avatar
thomwolf committed
344
        **logits**: ``tf.Tensor`` of shape ``(batch_size, config.num_labels)``
thomwolf's avatar
thomwolf committed
345
346
            Classification (or regression if config.num_labels==1) scores (before SoftMax).
        **hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
thomwolf's avatar
thomwolf committed
347
            list of ``tf.Tensor`` (one for the output of each layer + the output of the embeddings)
thomwolf's avatar
thomwolf committed
348
349
350
            of shape ``(batch_size, sequence_length, hidden_size)``:
            Hidden-states of the model at the output of each layer plus the initial embedding outputs.
        **attentions**: (`optional`, returned when ``config.output_attentions=True``)
thomwolf's avatar
thomwolf committed
351
            list of ``tf.Tensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
thomwolf's avatar
thomwolf committed
352
353
354
355
            Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.

    Examples::

thomwolf's avatar
thomwolf committed
356
357
358
        import tensorflow as tf
        from pytorch_transformers import RobertaTokenizer, TFRobertaForSequenceClassification

thomwolf's avatar
thomwolf committed
359
        tokenizer = RoertaTokenizer.from_pretrained('roberta-base')
thomwolf's avatar
thomwolf committed
360
361
362
363
364
        model = TFRobertaForSequenceClassification.from_pretrained('roberta-base')
        input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :]  # Batch size 1
        labels = tf.constant([1])[None, :]  # Batch size 1
        outputs = model(input_ids)
        logits = outputs[0]
thomwolf's avatar
thomwolf committed
365
366
367
368
369
370
371
372
373

    """
    def __init__(self, config, *inputs, **kwargs):
        super(TFRobertaForSequenceClassification, self).__init__(config, *inputs, **kwargs)
        self.num_labels = config.num_labels

        self.roberta = TFRobertaMainLayer(config, name="roberta")
        self.classifier = TFRobertaClassificationHead(config, name="classifier")
    
thomwolf's avatar
thomwolf committed
374
375
376
    def call(self, inputs, **kwargs):
        outputs = self.roberta(inputs, **kwargs)

thomwolf's avatar
thomwolf committed
377
        sequence_output = outputs[0]
thomwolf's avatar
thomwolf committed
378
        logits = self.classifier(sequence_output, training=kwargs.get('training', False))
thomwolf's avatar
thomwolf committed
379
380
381
382

        outputs = (logits,) + outputs[2:]

        return outputs  # logits, (hidden_states), (attentions)