"tests/vscode:/vscode.git/clone" did not exist on "504db92e7da010070c36e185332420a1d52c12b2"
modeling_tf_utils.py 72.1 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF general model utils."""
Julien Plu's avatar
Julien Plu committed
17

18
import functools
Julien Plu's avatar
Julien Plu committed
19
import inspect
thomwolf's avatar
thomwolf committed
20
import os
21
import re
Julien Plu's avatar
Julien Plu committed
22
import warnings
Sylvain Gugger's avatar
Sylvain Gugger committed
23
from typing import Dict, List, Optional, Union
thomwolf's avatar
thomwolf committed
24

Aymeric Augustin's avatar
Aymeric Augustin committed
25
import h5py
Julien Chaumond's avatar
Julien Chaumond committed
26
import numpy as np
thomwolf's avatar
thomwolf committed
27
import tensorflow as tf
Julien Plu's avatar
Julien Plu committed
28
from tensorflow.python.keras import backend as K
thomwolf's avatar
thomwolf committed
29
from tensorflow.python.keras.saving import hdf5_format
thomwolf's avatar
thomwolf committed
30
31

from .configuration_utils import PretrainedConfig
Julien Plu's avatar
Julien Plu committed
32
33
34
35
36
37
38
from .file_utils import (
    DUMMY_INPUTS,
    TF2_WEIGHTS_NAME,
    WEIGHTS_NAME,
    ModelOutput,
    cached_path,
    hf_bucket_url,
39
    is_offline_mode,
Julien Plu's avatar
Julien Plu committed
40
41
    is_remote_url,
)
42
from .generation_tf_utils import TFGenerationMixin
Julien Plu's avatar
Julien Plu committed
43
from .tokenization_utils_base import BatchEncoding
Lysandre Debut's avatar
Lysandre Debut committed
44
from .utils import logging
thomwolf's avatar
thomwolf committed
45

Aymeric Augustin's avatar
Aymeric Augustin committed
46

Lysandre Debut's avatar
Lysandre Debut committed
47
logger = logging.get_logger(__name__)
48
tf_logger = tf.get_logger()
thomwolf's avatar
thomwolf committed
49

Julien Plu's avatar
Julien Plu committed
50
51
52
53
TFModelInputType = Union[
    List[tf.Tensor], List[np.ndarray], Dict[str, tf.Tensor], Dict[str, np.ndarray], np.ndarray, tf.Tensor
]

54

55
class TFModelUtilsMixin:
Julien Chaumond's avatar
Julien Chaumond committed
56
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
57
    A few utilities for :obj:`tf.keras.Model`, to be used as a mixin.
Julien Chaumond's avatar
Julien Chaumond committed
58
59
60
61
    """

    def num_parameters(self, only_trainable: bool = False) -> int:
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
62
63
64
65
66
67
68
69
        Get the number of (optionally, trainable) parameters in the model.

        Args:
            only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to return only the number of trainable parameters

        Returns:
            :obj:`int`: The number of parameters.
Julien Chaumond's avatar
Julien Chaumond committed
70
71
72
73
74
75
76
        """
        if only_trainable:
            return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables))
        else:
            return self.count_params()


77
def keras_serializable(cls):
78
79
80
81
    """
    Decorate a Keras Layer class to support Keras serialization.

    This is done by:
Sylvain Gugger's avatar
Sylvain Gugger committed
82
83
84
85
86

    1. Adding a :obj:`transformers_config` dict to the Keras config dictionary in :obj:`get_config` (called by Keras at
       serialization time.
    2. Wrapping :obj:`__init__` to accept that :obj:`transformers_config` dict (passed by Keras at deserialization
       time) and convert it to a config object for the actual layer initializer.
Sylvain Gugger's avatar
Sylvain Gugger committed
87
88
    3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not
       need to be supplied in :obj:`custom_objects` in the call to :obj:`tf.keras.models.load_model`.
Sylvain Gugger's avatar
Sylvain Gugger committed
89
90
91
92
93
94
95
96

    Args:
        cls (a :obj:`tf.keras.layers.Layers subclass`):
            Typically a :obj:`TF.MainLayer` class in this project, in general must accept a :obj:`config` argument to
            its initializer.

    Returns:
        The same class object, with modifications for Keras deserialization.
97
    """
98
    initializer = cls.__init__
99

100
101
102
103
    config_class = getattr(cls, "config_class", None)
    if config_class is None:
        raise AttributeError("Must set `config_class` to use @keras_serializable")

104
    @functools.wraps(initializer)
105
    def wrapped_init(self, *args, **kwargs):
106
107
108
109
        config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None)

        if isinstance(config, dict):
            config = config_class.from_dict(config)
110
            initializer(self, config, *args, **kwargs)
111
112
113
114
115
        elif isinstance(config, PretrainedConfig):
            if len(args) > 0:
                initializer(self, *args, **kwargs)
            else:
                initializer(self, config, *args, **kwargs)
116
        else:
117
118
119
            raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)")

        self._config = config
Julien Plu's avatar
Julien Plu committed
120
        self._kwargs = kwargs
121

122
123
124
125
126
127
128
129
    cls.__init__ = wrapped_init

    if not hasattr(cls, "get_config"):
        raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses")
    if hasattr(cls.get_config, "_is_default"):

        def get_config(self):
            cfg = super(cls, self).get_config()
130
            cfg["config"] = self._config.to_dict()
Julien Plu's avatar
Julien Plu committed
131
            cfg.update(self._kwargs)
132
133
134
135
            return cfg

        cls.get_config = get_config

136
    cls._keras_serializable = True
137
138
139
    if hasattr(tf.keras.utils, "register_keras_serializable"):
        cls = tf.keras.utils.register_keras_serializable()(cls)
    return cls
140
141


142
class TFCausalLanguageModelingLoss:
Sylvain Gugger's avatar
Sylvain Gugger committed
143
144
145
146
147
148
149
150
151
    """
    Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token.

    .. note::

        Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.

    """

152
153
154
155
    def compute_loss(self, labels, logits):
        loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True, reduction=tf.keras.losses.Reduction.NONE
        )
Muennighoff's avatar
Muennighoff committed
156
        # make sure only labels that are not equal to -100 affect the loss
157
        active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
158
159
160
161
162
        reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
        labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
        return loss_fn(labels, reduced_logits)


Julien Plu's avatar
Julien Plu committed
163
class TFQuestionAnsweringLoss:
Sylvain Gugger's avatar
Sylvain Gugger committed
164
    """
165
    Loss function suitable for question answering.
Sylvain Gugger's avatar
Sylvain Gugger committed
166
167
    """

Julien Plu's avatar
Julien Plu committed
168
169
170
171
172
173
174
175
176
177
178
    def compute_loss(self, labels, logits):
        loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True, reduction=tf.keras.losses.Reduction.NONE
        )
        start_loss = loss_fn(labels["start_position"], logits[0])
        end_loss = loss_fn(labels["end_position"], logits[1])

        return (start_loss + end_loss) / 2.0


class TFTokenClassificationLoss:
Sylvain Gugger's avatar
Sylvain Gugger committed
179
180
181
182
183
184
185
186
187
    """
    Loss function suitable for token classification.

    .. note::

        Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.

    """

Julien Plu's avatar
Julien Plu committed
188
189
190
191
    def compute_loss(self, labels, logits):
        loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True, reduction=tf.keras.losses.Reduction.NONE
        )
192
193
        # make sure only labels that are not equal to -100
        # are taken into account as loss
194
        if tf.math.reduce_any(labels == -1):
Julien Plu's avatar
Julien Plu committed
195
196
197
198
            warnings.warn("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.")
            active_loss = tf.reshape(labels, (-1,)) != -1
        else:
            active_loss = tf.reshape(labels, (-1,)) != -100
Julien Plu's avatar
Julien Plu committed
199
200
201
202
203
204
205
        reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
        labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)

        return loss_fn(labels, reduced_logits)


class TFSequenceClassificationLoss:
Sylvain Gugger's avatar
Sylvain Gugger committed
206
207
208
209
    """
    Loss function suitable for sequence classification.
    """

Julien Plu's avatar
Julien Plu committed
210
    def compute_loss(self, labels, logits):
211
        if len(shape_list(logits)) == 1 or shape_list(logits)[1] == 1:
Julien Plu's avatar
Julien Plu committed
212
213
214
215
216
217
218
219
220
            loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)
        else:
            loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
                from_logits=True, reduction=tf.keras.losses.Reduction.NONE
            )

        return loss_fn(labels, logits)


Sylvain Gugger's avatar
Sylvain Gugger committed
221
222
223
224
225
226
class TFMultipleChoiceLoss(TFSequenceClassificationLoss):
    """Loss function suitable for multiple choice tasks."""


class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss):
    """
Lysandre's avatar
Lysandre committed
227
    Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens.
Sylvain Gugger's avatar
Sylvain Gugger committed
228

Lysandre's avatar
Lysandre committed
229
    .. note::
Sylvain Gugger's avatar
Sylvain Gugger committed
230

Lysandre's avatar
Lysandre committed
231
232
         Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
    """
Julien Plu's avatar
Julien Plu committed
233
234


235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
class TFNextSentencePredictionLoss:
    """
    Loss function suitable for next sentence prediction (NSP), that is, the task of guessing the next sentence.

    .. note::
         Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
    """

    def compute_loss(self, labels, logits):
        loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True, reduction=tf.keras.losses.Reduction.NONE
        )
        # make sure only labels that are not equal to -100
        # are taken into account as loss
        next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
        next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss)
        next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss)

        return loss_fn(next_sentence_label, next_sentence_reduced_logits)


256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
def booleans_processing(config, **kwargs):
    """
    Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or
    graph)

    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config of the running model.
        **kwargs:
            The boolean parameters

    Returns:
        A dictionary with the proper values for each boolean
    """
    final_booleans = {}

    if tf.executing_eagerly():
        final_booleans["output_attentions"] = (
            kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions
        )
        final_booleans["output_hidden_states"] = (
            kwargs["output_hidden_states"]
            if kwargs["output_hidden_states"] is not None
            else config.output_hidden_states
        )
Julien Plu's avatar
Julien Plu committed
281
282
283
        final_booleans["return_dict"] = (
            kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict
        )
284
285
286
287
288
289
290
291
292

        if "use_cache" in kwargs:
            final_booleans["use_cache"] = kwargs["use_cache"] if kwargs["use_cache"] is not None else config.use_cache
    else:
        if (
            kwargs["output_attentions"] is not None
            or kwargs["output_hidden_states"] is not None
            or ("use_cache" in kwargs and kwargs["use_cache"] is not None)
        ):
293
            tf_logger.warn(
294
295
296
297
298
299
300
                "The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model."
                "They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained('name', output_attentions=True)`)."
            )

        final_booleans["output_attentions"] = config.output_attentions
        final_booleans["output_hidden_states"] = config.output_hidden_states

Julien Plu's avatar
Julien Plu committed
301
        if kwargs["return_dict"] is not None:
302
            tf_logger.warn("The parameter `return_dict` cannot be set in graph mode and will always be set to `True`.")
Julien Plu's avatar
Julien Plu committed
303
        final_booleans["return_dict"] = True
304
305
306
307
308
309
310
311
312

        if "use_cache" in kwargs:
            final_booleans["use_cache"] = config.use_cache

    return final_booleans


def input_processing(func, config, input_ids, **kwargs):
    """
Julien Plu's avatar
Julien Plu committed
313
314
315
    Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input
    has to be named accordingly to the parameters name, i.e. `input_ids = tf.keras.Input(shape=(128,), dtype='int32',
    name="input_ids")` otherwise the order of the tensors will not be guaranteed during the training.
316
317
318
319
320
321
322
323
324
325
326
327

    Args:
        func (:obj:`callable`):
            The callable function of the TensorFlow model.
        config (:class:`~transformers.PretrainedConfig`):
            The config of the running model.
        **kwargs:
            The inputs of the model.

    Returns:
        Two lists, one for the missing layers, and another one for the unexpected layers.
    """
Julien Plu's avatar
Julien Plu committed
328
329
    signature = dict(inspect.signature(func).parameters)
    signature.pop("kwargs", None)
Julien Plu's avatar
Julien Plu committed
330
    signature.pop("self", None)
Julien Plu's avatar
Julien Plu committed
331
332
    parameter_names = list(signature.keys())
    output = {}
333
    allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray)
Julien Plu's avatar
Julien Plu committed
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354

    if "inputs" in kwargs["kwargs_call"]:
        warnings.warn(
            "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
            FutureWarning,
        )

        output["input_ids"] = kwargs["kwargs_call"].pop("inputs")

    if "decoder_cached_states" in kwargs["kwargs_call"]:
        warnings.warn(
            "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
            FutureWarning,
        )
        output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states")

    if len(kwargs["kwargs_call"]) > 0:
        raise ValueError(
            f"The following keyword arguments are not supported by this model: {list(kwargs['kwargs_call'].keys())}."
        )

Julien Plu's avatar
Julien Plu committed
355
356
    kwargs.pop("kwargs_call")

Julien Plu's avatar
Julien Plu committed
357
358
359
360
    for k, v in kwargs.items():
        if isinstance(v, allowed_types) or v is None:
            output[k] = v
        else:
Julien Plu's avatar
Julien Plu committed
361
            raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
Julien Plu's avatar
Julien Plu committed
362
363
364
365
366

    if isinstance(input_ids, (tuple, list)):
        for i, input in enumerate(input_ids):
            # EagerTensors don't allow to use the .name property so we check for a real Tensor
            if type(input) == tf.Tensor:
Julien Plu's avatar
Julien Plu committed
367
368
                # Tensor names have always the pattern `name:id` then we check only the
                # `name` part
Julien Plu's avatar
Julien Plu committed
369
370
371
372
373
                tensor_name = input.name.split(":")[0]

                if tensor_name in parameter_names:
                    output[tensor_name] = input
                else:
Julien Plu's avatar
Julien Plu committed
374
                    output[parameter_names[i]] = input
Julien Plu's avatar
Julien Plu committed
375
376
377
378
            elif isinstance(input, allowed_types) or input is None:
                output[parameter_names[i]] = input
            else:
                raise ValueError(
Julien Plu's avatar
Julien Plu committed
379
                    f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}."
Julien Plu's avatar
Julien Plu committed
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
                )
    elif isinstance(input_ids, (dict, BatchEncoding)):
        if "inputs" in input_ids:
            warnings.warn(
                "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
                FutureWarning,
            )

            output["input_ids"] = input_ids.pop("inputs")

        if "decoder_cached_states" in input_ids:
            warnings.warn(
                "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
                FutureWarning,
            )
            output["past_key_values"] = input_ids.pop("decoder_cached_states")

        for k, v in dict(input_ids).items():
398
            if isinstance(v, allowed_types) or v is None:
Julien Plu's avatar
Julien Plu committed
399
                output[k] = v
400
401
402
403
404
405
            elif k not in parameter_names and "args" not in parameter_names:
                logger.warn(
                    f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored."
                )
                continue
            else:
Julien Plu's avatar
Julien Plu committed
406
                raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
Julien Plu's avatar
Julien Plu committed
407
408
409
410
411
    else:
        if isinstance(input_ids, tf.Tensor) or input_ids is None:
            output[parameter_names[0]] = input_ids
        else:
            raise ValueError(
Julien Plu's avatar
Julien Plu committed
412
                f"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}."
Julien Plu's avatar
Julien Plu committed
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
            )

    for name in parameter_names:
        if name not in list(output.keys()) and name != "args":
            output[name] = kwargs.pop(name, signature[name].default)

    # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)
    # So to respect the proper output we have to add this exception
    if "args" in output:
        if output["args"] is not None and type(output["args"]) == tf.Tensor:
            tensor_name = output["args"].name.split(":")[0]
            output[tensor_name] = output["args"]
        else:
            # `args` in this case is always the first parameter, then `input_ids`
            output["input_ids"] = output["args"]

        del output["args"]

    if "kwargs" in output:
        del output["kwargs"]

434
435
436
437
438
439
440
441
442
443
444
445
446
    boolean_dict = {
        k: v
        for k, v in output.items()
        if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"]
    }

    output.update(
        booleans_processing(
            config=config,
            **boolean_dict,
        )
    )

Julien Plu's avatar
Julien Plu committed
447
448
449
    return output


Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
450
def load_tf_weights(model, resolved_archive_file, _prefix=None):
Julien Plu's avatar
Julien Plu committed
451
    """
Julien Plu's avatar
Julien Plu committed
452
    Detect missing and unexpected layers and load the TF weights accordingly to their names and shapes.
Julien Plu's avatar
Julien Plu committed
453
454
455
456
457
458
459
460
461
462
463
464
465

    Args:
        model (:obj:`tf.keras.models.Model`):
            The model to load the weights into.
        resolved_archive_file (:obj:`str`):
            The location of the H5 file.

    Returns:
        Two lists, one for the missing layers, and another one for the unexpected layers.
    """
    missing_layers = []
    unexpected_layers = []

Julien Plu's avatar
Julien Plu committed
466
    # Read the H5 file
Julien Plu's avatar
Julien Plu committed
467
    with h5py.File(resolved_archive_file, "r") as f:
Julien Plu's avatar
Julien Plu committed
468
469
        # Retrieve the name of each layer from the H5 file
        saved_h5_model_layers_name = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names"))
Julien Plu's avatar
Julien Plu committed
470

Julien Plu's avatar
Julien Plu committed
471
472
        # Find the missing layers from the high level list of layers
        missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name)
Julien Plu's avatar
Julien Plu committed
473

Julien Plu's avatar
Julien Plu committed
474
475
476
477
        # Find the unexpected layers from the high level list of layers
        unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers]))
        saved_weight_names_set = set()
        symbolic_weights_names = set()
Julien Plu's avatar
Julien Plu committed
478
479
        weight_value_tuples = []

Julien Plu's avatar
Julien Plu committed
480
481
        # Compute missing and unexpected sub layers
        # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...]
Julien Plu's avatar
Julien Plu committed
482
        for layer in model.layers:
Julien Plu's avatar
Julien Plu committed
483
484
485
486
487
            # if layer_name from the H5 file belongs to the layers from the instantiated model
            if layer.name in saved_h5_model_layers_name:
                # Get the H5 layer object from its name
                h5_layer_object = f[layer.name]
                # Get all the weights as a list from the layer object
Julien Plu's avatar
Julien Plu committed
488
                symbolic_weights = layer.trainable_weights + layer.non_trainable_weights
Julien Plu's avatar
Julien Plu committed
489
                saved_weights = {}
Julien Plu's avatar
Julien Plu committed
490

Julien Plu's avatar
Julien Plu committed
491
492
493
494
                # Create a dict from the H5 saved model that looks like {"weight_name": weight_value}
                # And a set with only the names
                for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, "weight_names"):
                    # TF names always start with the model name so we ignore it
Julien Plu's avatar
Julien Plu committed
495
                    name = "/".join(weight_name.split("/")[1:])
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
496
497
498
499

                    if _prefix is not None:
                        name = _prefix + "/" + name

Julien Plu's avatar
Julien Plu committed
500
                    saved_weights[name] = np.asarray(h5_layer_object[weight_name])
Julien Plu's avatar
Julien Plu committed
501

Julien Plu's avatar
Julien Plu committed
502
503
504
505
                    # Add the updated name to the final list for computing missing/unexpected values
                    saved_weight_names_set.add(name)

                # Loop over each weights from the instantiated model and compare with the weights from the H5 file
Julien Plu's avatar
Julien Plu committed
506
                for symbolic_weight in symbolic_weights:
Julien Plu's avatar
Julien Plu committed
507
                    # TF names always start with the model name so we ignore it
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
508
509
510
511
512
513
514
515
                    if _prefix is not None:
                        delimeter = len(_prefix.split("/"))
                        symbolic_weight_name = "/".join(
                            symbolic_weight.name.split("/")[:delimeter]
                            + symbolic_weight.name.split("/")[delimeter + 1 :]
                        )
                    else:
                        symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:])
Julien Plu's avatar
Julien Plu committed
516
517
518
519
520

                    # here we check if the current weight is among the weights from the H5 file
                    # If yes, get the weight_value of the corresponding weight from the H5 file
                    # If not, make the value to None
                    saved_weight_value = saved_weights.get(symbolic_weight_name, None)
Julien Plu's avatar
Julien Plu committed
521

Julien Plu's avatar
Julien Plu committed
522
523
                    # Add the updated name to the final list for computing missing/unexpected values
                    symbolic_weights_names.add(symbolic_weight_name)
Julien Plu's avatar
Julien Plu committed
524

Julien Plu's avatar
Julien Plu committed
525
526
527
                    # If the current weight is found
                    if saved_weight_value is not None:
                        # Check if the shape of the current weight and the one from the H5 file are different
Julien Plu's avatar
Julien Plu committed
528
                        if K.int_shape(symbolic_weight) != saved_weight_value.shape:
Julien Plu's avatar
Julien Plu committed
529
530
                            # If yes we reshape the weight from the H5 file accordingly to the current weight
                            # If the two shapes are not compatible we raise an issue
Julien Plu's avatar
Julien Plu committed
531
532
533
534
535
536
537
538
                            try:
                                array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight))
                            except AssertionError as e:
                                e.args += (K.int_shape(symbolic_weight), saved_weight_value.shape)
                                raise e
                        else:
                            array = saved_weight_value

Julien Plu's avatar
Julien Plu committed
539
                        # We create the tuple that will be loaded and add it to the final list
Julien Plu's avatar
Julien Plu committed
540
541
                        weight_value_tuples.append((symbolic_weight, array))

Julien Plu's avatar
Julien Plu committed
542
    # Load all the weights
Julien Plu's avatar
Julien Plu committed
543
544
    K.batch_set_value(weight_value_tuples)

Julien Plu's avatar
Julien Plu committed
545
546
547
548
549
550
    # Compute the missing and unexpected layers
    missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set))
    unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names))

    return missing_layers, unexpected_layers

Julien Plu's avatar
Julien Plu committed
551

552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
def init_copy_embeddings(old_embeddings, new_num_tokens):
    r"""
    This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case
    new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be
    kept or not. Example:

        - if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4]

            -  mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1]
        - if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5]

            - mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4]
    """
    old_num_tokens, old_embedding_dim = shape_list(old_embeddings)
    size_diff = new_num_tokens - old_num_tokens

    # initialize new embeddings
    # Copy token embeddings from the previous ones
    if tf.math.greater(size_diff, 0):
        # if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size
        # and we create a mask to properly identify the padded values and be replaced by the values of the newly created
        # embeddings
        current_weights = tf.pad(
            old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1
        )
        num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
        mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True)
        mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False)
    else:
        # if the new size if lower than the old one, we take the current embeddings until the new size
        current_weights = tf.slice(
            old_embeddings.value(),
            tf.convert_to_tensor([0, 0]),
            tf.convert_to_tensor([new_num_tokens, old_embedding_dim]),
        )
        mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True)

    return mask, current_weights


592
class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin):
593
594
    r"""
    Base class for all TF models.
thomwolf's avatar
thomwolf committed
595

596
597
    :class:`~transformers.TFPreTrainedModel` takes care of storing the configuration of the models and handles methods
    for loading, downloading and saving models as well as a few methods common to all models to:
thomwolf's avatar
thomwolf committed
598

599
600
        * resize the input embeddings,
        * prune heads in the self-attention heads.
thomwolf's avatar
thomwolf committed
601

602
    Class attributes (overridden by derived classes):
Sylvain Gugger's avatar
Sylvain Gugger committed
603

604
605
606
607
        - **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of
          :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
        - **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in
          derived classes of the same architecture adding modules on top of the base model.
thomwolf's avatar
thomwolf committed
608
609
610
    """
    config_class = None
    base_model_prefix = ""
611
612
613
614
615
616
    # a list of re pattern of tensor names to ignore from the model when loading the model weights
    # (and avoid unnecessary warnings).
    _keys_to_ignore_on_load_missing = None
    # a list of re pattern of tensor names to ignore from the weights when loading the model weights
    # (and avoid unnecessary warnings).
    _keys_to_ignore_on_load_unexpected = None
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
617
    _requires_load_weight_prefix = False
thomwolf's avatar
thomwolf committed
618

619
    @property
620
621
    def dummy_inputs(self) -> Dict[str, tf.Tensor]:
        """
Julien Plu's avatar
Julien Plu committed
622
623
624
625
        Dummy inputs to build the network.

        Returns:
            :obj:`Dict[str, tf.Tensor]`: The dummy inputs.
626
        """
Julien Plu's avatar
Julien Plu committed
627
628
629
        return {
            "input_ids": tf.constant(DUMMY_INPUTS),
        }
thomwolf's avatar
thomwolf committed
630
631

    def __init__(self, config, *inputs, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
632
        super().__init__(*inputs, **kwargs)
thomwolf's avatar
thomwolf committed
633
634
        if not isinstance(config, PretrainedConfig):
            raise ValueError(
635
636
637
                f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
                "`PretrainedConfig`. To create a model from a pretrained model use "
                f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
638
            )
639
        # Save config and origin of the pretrained weights if given in model
thomwolf's avatar
thomwolf committed
640
        self.config = config
641
        self.name_or_path = config.name_or_path
thomwolf's avatar
thomwolf committed
642

Julien Plu's avatar
Julien Plu committed
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
    @tf.function(
        input_signature=[
            {
                "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
                "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
                "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
            }
        ]
    )
    def serving(self, inputs):
        """
        Method used for serving the model.

        Args:
            inputs (:obj:`Dict[str, tf.Tensor]`):
                The input of the saved model as a dictionnary of tensors.
        """
        output = self.call(inputs)

        return self.serving_output(output)

    def serving_output(output):
        """
        Prepare the output of the saved model. Each model must implement this function.

        Args:
            output (:obj:`~transformers.TFBaseModelOutput`):
                The output returned by the model.
        """
        raise NotImplementedError

674
    def get_input_embeddings(self) -> tf.keras.layers.Layer:
675
        """
676
        Returns the model's input embeddings layer.
677
678

        Returns:
679
            :obj:`tf.Variable`: The embeddings layer mapping vocabulary to hidden states.
680
        """
681
        main_layer = getattr(self, self.base_model_prefix, self)
Julien Plu's avatar
Julien Plu committed
682

683
684
        if main_layer is not self:
            return main_layer.get_input_embeddings()
685
686
687
        else:
            raise NotImplementedError

688
689
    def set_input_embeddings(self, value):
        """
690
        Set model's input embeddings
691
692

        Args:
693
694
            value (:obj:`tf.Variable`):
                The new weights mapping hidden states to vocabulary.
695
        """
696
        main_layer = getattr(self, self.base_model_prefix)
697

698
699
700
701
702
703
704
705
706
707
708
        if main_layer is None:
            raise NotImplementedError("The model does not implements the base_model_prefix attribute.")

        try:
            main_layer.set_input_embeddings(value)
        except AttributeError:
            logger.info("Building the model")
            self(self.dummy_inputs)
            main_layer.set_input_embeddings(value)

    def get_output_embeddings(self) -> Union[None, tf.keras.layers.Layer]:
709
        """
710
        Returns the model's output embeddings
711
712

        Returns:
713
            :obj:`tf.Variable`: The new weights mapping vocabulary to hidden states.
714
        """
715
716
717
718
719
        if self.get_lm_head() is not None:
            lm_head = self.get_lm_head()

            return lm_head.get_output_embeddings()

720
721
        return None  # Overwrite for models with output embeddings

722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
    def set_output_embeddings(self, value):
        """
        Set model's output embeddings

        Args:
            value (:obj:`tf.Variable`):
                The new weights mapping hidden states to vocabulary.
        """
        if self.get_lm_head() is not None:
            lm_head = self.get_lm_head()
            try:
                lm_head.set_output_embeddings(value)
            except AttributeError:
                logger.info("Building the model")
                self(self.dummy_inputs)
                lm_head.set_output_embeddings(value)

739
740
741
    def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]:
        """
        Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the
742
        embeddings
743
744
745
746

        Return:
            :obj:`tf.keras.layers.Layer`: The layer that handles the bias, None if not an LM model.
        """
747
748
749
750
        warnings.warn(
            "The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning
        )
        return self.get_lm_head()
751
752
753

    def get_prefix_bias_name(self) -> Union[None, str]:
        """
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
754
        Get the concatenated _prefix name of the bias from the model name to the parent layer
755
756

        Return:
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
757
            :obj:`str`: The _prefix name of the bias.
758
        """
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
        warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
        return None

    def get_bias(self) -> Union[None, Dict[str, tf.Variable]]:
        """
        Dict of bias attached to an LM head. The key represents the name of the bias attribute.

        Return:
            :obj:`tf.Variable`: The weights representing the bias, None if not an LM model.
        """
        if self.get_lm_head() is not None:
            lm_head = self.get_lm_head()
            try:
                return lm_head.get_bias()
            except AttributeError:
                self(self.dummy_inputs)

                return lm_head.get_bias()
        return None

    def set_bias(self, value):
        """
        Set all the bias in the LM head.

        Args:
            value (:obj:`Dict[tf.Variable]`):
                All the new bias attached to an LM head.
        """
        if self.get_lm_head() is not None:
            lm_head = self.get_lm_head()
            try:
                lm_head.set_bias(value)
            except AttributeError:
                self(self.dummy_inputs)
                lm_head.set_bias(value)

    def get_lm_head(self) -> tf.keras.layers.Layer:
        """
        The LM Head layer. This method must be overwritten by all the models that have a lm head.

        Return:
            :obj:`tf.keras.layers.Layer`: The LM head layer if the model has one, None if not.
        """
802
803
        return None

804
805
806
    def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable:
        """
        Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`.
807

808
        Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method.
809

810
811
812
813
        Arguments:
            new_num_tokens (:obj:`int`, `optional`):
                The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
                vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`,
814
                just returns a pointer to the input tokens :obj:`tf.Variable` module of the model without doing
815
816
817
818
                anything.

        Return:
            :obj:`tf.Variable`: Pointer to the input tokens Embeddings Module of the model.
819
        """
820
821
        if new_num_tokens is None or new_num_tokens == self.config.vocab_size:
            return self._get_word_embedding_weight(self.get_input_embeddings())
822

823
        model_embeds = self._resize_token_embeddings(new_num_tokens)
824
825
826

        # Update base model and current model config
        self.config.vocab_size = new_num_tokens
827
828
829

        return model_embeds

830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
    def _get_word_embedding_weight(model, embedding_layer):
        embeds = getattr(embedding_layer, "weight", None)
        if embeds is not None:
            return embeds

        embeds = getattr(embedding_layer, "decoder", None)
        if embeds is not None:
            return embeds

        # The reason why the attributes don't exist might be
        # because the model is not built, so retry getting
        # the argument after building the model
        model(model.dummy_inputs)

        embeds = getattr(embedding_layer, "weight", None)
        if embeds is not None:
            return embeds

        embeds = getattr(embedding_layer, "decoder", None)
        if embeds is not None:
            return embeds

        return None
853

854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
    def _resize_token_embeddings(self, new_num_tokens):
        old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings())
        new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)

        # if word embeddings are not tied, make sure that lm head bias is resized as well
        if self.get_bias() is not None:
            old_lm_head_bias = self.get_bias()
            new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens)

            self.set_bias(new_lm_head_bias)

        # if word embeddings are not tied, make sure that lm head decoder is resized as well
        if self.get_output_embeddings() is not None:
            old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings())
            new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens)

            self.set_output_embeddings(new_lm_head_decoder)

        self.set_input_embeddings(new_embeddings)

        return self.get_input_embeddings()

    def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens):
877
        """
878
879
        Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end.
        Reducing the size will remove vectors from the end
thomwolf's avatar
thomwolf committed
880
881

        Args:
882
883
            old_lm_head_bias (:obj:`tf.Variable`):
                Old lm head bias to be resized.
884
            new_num_tokens (:obj:`int`, `optional`):
885
                New number of tokens in the linear matrix.
886
887

                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
888
                vectors from the end. If not provided or :obj:`None`, just returns None
889
890

        Return:
891
            :obj:`tf.Variable`: Pointer to the resized bias.
thomwolf's avatar
thomwolf committed
892
        """
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
        new_lm_head_bias = {}

        for attr, weight in old_lm_head_bias.items():
            first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight)
            size_diff = new_num_tokens - old_num_tokens
            final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens]

            # initialize new bias
            if tf.math.greater(size_diff, 0):
                padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]]
                current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1)
                num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
                mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy]
                bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True)
                bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False)
            else:
                slice_from = [0] if first_dim is None else [0, 0]
                current_bias = tf.slice(
                    weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape)
                )
                bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True)
914

915
916
917
918
919
920
921
            new_bias = self.add_weight(
                shape=final_shape,
                initializer="zeros",
                trainable=True,
                name=weight.name.split(":")[0],
            )
            init_bias = tf.where(bias_mask, current_bias, new_bias.value())
922

923
924
            new_bias.assign(init_bias)
            new_lm_head_bias[attr] = new_bias
925

926
        return new_lm_head_bias
thomwolf's avatar
thomwolf committed
927

928
929
930
931
    def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens):
        """
        Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end.
        Reducing the size will remove vectors from the end
thomwolf's avatar
thomwolf committed
932

933
934
935
936
937
        Args:
            old_lm_head_decoder (:obj:`tf.Variable`):
                Old lm head decoder to be resized.
            new_num_tokens (:obj:`int`, `optional`):
                New number of tokens in the linear matrix.
938

939
940
                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
                vectors from the end. If not provided or :obj:`None`, just returns None
941

942
943
944
945
946
947
948
949
        Return:
            :obj:`tf.Variable`: Pointer to the resized decoder or None if the output embeddings are differents of the
            input ones.
        """
        new_lm_head_decoder = old_lm_head_decoder
        is_input_output_equals = tf.reduce_any(
            self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder
        )
950

951
952
953
954
955
        if old_lm_head_decoder is not None and not is_input_output_equals:
            old_embedding_dim = shape_list(old_lm_head_decoder)[1]
            decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens)
            new_lm_head_decoder = self.add_weight(
                shape=(new_num_tokens, old_embedding_dim),
956
957
                initializer="zeros",
                trainable=True,
958
                name=old_lm_head_decoder.name.split(":")[0],
959
            )
960
961
962
            init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value())

            new_lm_head_decoder.assign(init_decoder)
963

964
        return new_lm_head_decoder
965

966
967
968
969
    def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable:
        """
        Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly
        initialized vectors at the end. Reducing the size will remove vectors from the end
970

971
972
973
974
975
        Args:
            old_embeddings (:obj:`tf.Variable`):
                Old embeddings to be resized.
            new_num_tokens (:obj:`int`, `optional`):
                New number of tokens in the embedding matrix.
976

977
978
979
                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
                vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
                :obj:`tf.Variable`` module of the model without doing anything.
980

981
982
983
984
985
986
987
988
989
990
991
992
993
994
        Return:
            :obj:`tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if
            :obj:`new_num_tokens` is :obj:`None`
        """
        old_embedding_dim = shape_list(old_embeddings)[1]
        init_range = getattr(self.config, "initializer_range", 0.02)
        embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens)
        new_embeddings = self.add_weight(
            name=old_embeddings.name.split(":")[0],
            shape=[new_num_tokens, old_embedding_dim],
            initializer=get_initializer(init_range),
            dtype=tf.float32,
        )
        init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value())
995

996
        new_embeddings.assign(init_embeddings)
997

998
        return new_embeddings
thomwolf's avatar
thomwolf committed
999
1000

    def prune_heads(self, heads_to_prune):
1001
1002
        """
        Prunes heads of the base model.
thomwolf's avatar
thomwolf committed
1003

1004
1005
        Arguments:
            heads_to_prune (:obj:`Dict[int, List[int]]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1006
1007
1008
                Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of
                heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads
                0 and 2 on layer 1 and heads 2 and 3 on layer 2.
thomwolf's avatar
thomwolf committed
1009
1010
1011
        """
        raise NotImplementedError

Julien Plu's avatar
Julien Plu committed
1012
    def save_pretrained(self, save_directory, saved_model=False, version=1):
1013
1014
        """
        Save a model and its configuration file to a directory, so that it can be re-loaded using the
Sylvain Gugger's avatar
Sylvain Gugger committed
1015
        :func:`~transformers.TFPreTrainedModel.from_pretrained` class method.
1016
1017
1018
1019

        Arguments:
            save_directory (:obj:`str`):
                Directory to which to save. Will be created if it doesn't exist.
Julien Plu's avatar
Julien Plu committed
1020
1021
1022
1023
1024
1025
            saved_model (:obj:`bool`, `optional`, defaults to :obj:`False`):
                If the model has to be saved in saved model format as well or not.
            version (:obj:`int`, `optional`, defaults to 1):
                The version of the saved model. A saved model needs to be versioned in order to be properly loaded by
                TensorFlow Serving as detailed in the official documentation
                https://www.tensorflow.org/tfx/serving/serving_basic
thomwolf's avatar
thomwolf committed
1026
        """
1027
        if os.path.isfile(save_directory):
1028
            logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
1029
1030
            return
        os.makedirs(save_directory, exist_ok=True)
thomwolf's avatar
thomwolf committed
1031

Julien Plu's avatar
Julien Plu committed
1032
1033
1034
1035
1036
        if saved_model:
            saved_model_dir = os.path.join(save_directory, "saved_model", str(version))
            self.save(saved_model_dir, include_optimizer=False, signatures=self.serving)
            logger.info(f"Saved model created in {saved_model_dir}")

thomwolf's avatar
thomwolf committed
1037
1038
1039
1040
1041
1042
        # Save configuration file
        self.config.save_pretrained(save_directory)

        # If we save using the predefined names, we can load using `from_pretrained`
        output_model_file = os.path.join(save_directory, TF2_WEIGHTS_NAME)
        self.save_weights(output_model_file)
1043
        logger.info(f"Model weights saved in {output_model_file}")
thomwolf's avatar
thomwolf committed
1044
1045
1046

    @classmethod
    def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
1047
1048
        r"""
        Instantiate a pretrained TF 2.0 model from a pre-trained model configuration.
thomwolf's avatar
thomwolf committed
1049

1050
1051
1052
        The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come
        pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
        task.
thomwolf's avatar
thomwolf committed
1053

1054
1055
        The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those
        weights are discarded.
thomwolf's avatar
thomwolf committed
1056
1057

        Parameters:
1058
1059
1060
            pretrained_model_name_or_path (:obj:`str`, `optional`):
                Can be either:

1061
1062
1063
                    - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
                      Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
                      a user or organization name, like ``dbmdz/bert-base-german-cased``.
1064
                    - A path to a `directory` containing model weights saved using
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
1065
                      :func:`~transformers.TFPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
Sylvain Gugger's avatar
Sylvain Gugger committed
1066
                    - A path or url to a `PyTorch state_dict save file` (e.g, ``./pt_model/pytorch_model.bin``). In
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
                      this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided
                      as ``config`` argument. This loading path is slower than converting the PyTorch model in a
                      TensorFlow model using the provided conversion scripts and loading the TensorFlow model
                      afterwards.
                    - :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
                      arguments ``config`` and ``state_dict``).
            model_args (sequence of positional arguments, `optional`):
                All remaning positional arguments will be passed to the underlying model's ``__init__`` method.
            config (:obj:`Union[PretrainedConfig, str]`, `optional`):
                Can be either:

                    - an instance of a class derived from :class:`~transformers.PretrainedConfig`,
                    - a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`.

                Configuration for the model to use instead of an automatically loaded configuation. Configuration can
                be automatically loaded when:

1084
1085
                    - The model is a model provided by the library (loaded with the `model id` string of a pretrained
                      model).
1086
                    - The model was saved using :func:`~transformers.TFPreTrainedModel.save_pretrained` and is reloaded
1087
1088
                      by supplying the save directory.
                    - The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
                      configuration JSON file named `config.json` is found in the directory.
            from_pt: (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Load the model weights from a PyTorch state_dict save file (see docstring of
                ``pretrained_model_name_or_path`` argument).
            cache_dir (:obj:`str`, `optional`):
                Path to a directory in which a downloaded pretrained model configuration should be cached if the
                standard cache should not be used.
            force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to delete incompletely received files. Will attempt to resume the download if such a
                file exists.
            proxies: (:obj:`Dict[str, str], `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1103
1104
                A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
1105
            output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1106
                Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
1107
1108
            local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to only look at local files (e.g., not try doanloading the model).
1109
1110
1111
            use_auth_token (:obj:`str` or `bool`, `optional`):
                The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
                generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
Julien Chaumond's avatar
Julien Chaumond committed
1112
1113
1114
1115
            revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
                identifier allowed by git.
1116
            mirror(:obj:`str`, `optional`, defaults to :obj:`None`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1117
1118
1119
                Mirror source to accelerate downloads in China. If you are from China and have an accessibility
                problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
                Please refer to the mirror site for more information.
1120
1121
            kwargs (remaining dictionary of keyword arguments, `optional`):
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
1122
                :obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
                automatically loaded:

                    - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
                      underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
                      already been done)
                    - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
                      initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
                      ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
                      with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
                      attribute will be passed to the underlying model's ``__init__`` function.
thomwolf's avatar
thomwolf committed
1133

1134
1135
1136
1137
        .. note::

            Passing :obj:`use_auth_token=True` is required when you want to use a private model.

thomwolf's avatar
thomwolf committed
1138
1139
        Examples::

1140
            >>> from transformers import BertConfig, TFBertModel
1141
            >>> # Download model and configuration from huggingface.co and cache.
1142
1143
1144
1145
1146
1147
1148
1149
1150
            >>> model = TFBertModel.from_pretrained('bert-base-uncased')
            >>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable).
            >>> model = TFBertModel.from_pretrained('./test/saved_model/')
            >>> # Update configuration during loading.
            >>> model = TFBertModel.from_pretrained('bert-base-uncased', output_attentions=True)
            >>> assert model.config.output_attentions == True
            >>> # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable).
            >>> config = BertConfig.from_json_file('./pt_model/my_pt_model_config.json')
            >>> model = TFBertModel.from_pretrained('./pt_model/my_pytorch_model.bin', from_pt=True, config=config)
thomwolf's avatar
thomwolf committed
1151
1152

        """
1153
1154
1155
1156
1157
1158
1159
        config = kwargs.pop("config", None)
        cache_dir = kwargs.pop("cache_dir", None)
        from_pt = kwargs.pop("from_pt", False)
        force_download = kwargs.pop("force_download", False)
        resume_download = kwargs.pop("resume_download", False)
        proxies = kwargs.pop("proxies", None)
        output_loading_info = kwargs.pop("output_loading_info", False)
1160
        local_files_only = kwargs.pop("local_files_only", False)
1161
        use_auth_token = kwargs.pop("use_auth_token", None)
Julien Chaumond's avatar
Julien Chaumond committed
1162
        revision = kwargs.pop("revision", None)
1163
        mirror = kwargs.pop("mirror", None)
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
1164
        load_weight_prefix = kwargs.pop("load_weight_prefix", None)
1165
1166
1167
1168
1169
1170
        from_pipeline = kwargs.pop("_from_pipeline", None)
        from_auto_class = kwargs.pop("_from_auto", False)

        user_agent = {"file_type": "model", "framework": "tensorflow", "from_auto_class": from_auto_class}
        if from_pipeline is not None:
            user_agent["using_pipeline"] = from_pipeline
thomwolf's avatar
thomwolf committed
1171

1172
1173
1174
1175
        if is_offline_mode() and not local_files_only:
            logger.info("Offline mode: forcing local_files_only=True")
            local_files_only = True

1176
1177
1178
        # Load config if we don't provide a configuration
        if not isinstance(config, PretrainedConfig):
            config_path = config if config is not None else pretrained_model_name_or_path
thomwolf's avatar
thomwolf committed
1179
            config, model_kwargs = cls.config_class.from_pretrained(
1180
1181
1182
1183
                config_path,
                *model_args,
                cache_dir=cache_dir,
                return_unused_kwargs=True,
thomwolf's avatar
thomwolf committed
1184
                force_download=force_download,
1185
                resume_download=resume_download,
1186
1187
                proxies=proxies,
                local_files_only=local_files_only,
1188
                use_auth_token=use_auth_token,
Julien Chaumond's avatar
Julien Chaumond committed
1189
                revision=revision,
1190
1191
                _from_auto=from_auto_class,
                _from_pipeline=from_pipeline,
1192
                **kwargs,
thomwolf's avatar
thomwolf committed
1193
1194
1195
1196
1197
            )
        else:
            model_kwargs = kwargs

        # Load model
thomwolf's avatar
thomwolf committed
1198
        if pretrained_model_name_or_path is not None:
1199
            if os.path.isdir(pretrained_model_name_or_path):
1200
1201
1202
1203
                if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
                    # Load from a PyTorch checkpoint in priority if from_pt
                    archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
                elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
thomwolf's avatar
thomwolf committed
1204
1205
1206
                    # Load from a TF 2.0 checkpoint
                    archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
                else:
1207
                    raise EnvironmentError(
1208
1209
                        f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME]} found in directory "
                        f"{pretrained_model_name_or_path} or `from_pt` set to False"
1210
                    )
Julien Chaumond's avatar
Julien Chaumond committed
1211
            elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
thomwolf's avatar
thomwolf committed
1212
                archive_file = pretrained_model_name_or_path
1213
1214
            elif os.path.isfile(pretrained_model_name_or_path + ".index"):
                archive_file = pretrained_model_name_or_path + ".index"
thomwolf's avatar
thomwolf committed
1215
            else:
thomwolf's avatar
thomwolf committed
1216
                archive_file = hf_bucket_url(
Julien Chaumond's avatar
Julien Chaumond committed
1217
1218
                    pretrained_model_name_or_path,
                    filename=(WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME),
Julien Chaumond's avatar
Julien Chaumond committed
1219
                    revision=revision,
1220
                    mirror=mirror,
thomwolf's avatar
thomwolf committed
1221
                )
thomwolf's avatar
thomwolf committed
1222
1223

            try:
1224
                # Load from URL or cache if already cached
1225
1226
1227
1228
1229
                resolved_archive_file = cached_path(
                    archive_file,
                    cache_dir=cache_dir,
                    force_download=force_download,
                    proxies=proxies,
1230
1231
                    resume_download=resume_download,
                    local_files_only=local_files_only,
1232
                    use_auth_token=use_auth_token,
1233
                    user_agent=user_agent,
1234
                )
Julien Chaumond's avatar
Julien Chaumond committed
1235
1236
            except EnvironmentError as err:
                logger.error(err)
1237
1238
1239
1240
1241
1242
                msg = (
                    f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
                    f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
                    f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {TF2_WEIGHTS_NAME}, {WEIGHTS_NAME}.\n\n"
                )
                raise EnvironmentError(msg)
thomwolf's avatar
thomwolf committed
1243
            if resolved_archive_file == archive_file:
1244
                logger.info(f"loading weights file {archive_file}")
thomwolf's avatar
thomwolf committed
1245
            else:
1246
                logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}")
thomwolf's avatar
thomwolf committed
1247
        else:
thomwolf's avatar
thomwolf committed
1248
            resolved_archive_file = None
thomwolf's avatar
thomwolf committed
1249

1250
1251
        config.name_or_path = pretrained_model_name_or_path

Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
1252
1253
1254
1255
1256
        # composed models, *e.g.* TFRag, require special treatment when it comes to loading
        # pre-trained weights.
        if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None:
            model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name")

thomwolf's avatar
thomwolf committed
1257
1258
1259
1260
        # Instantiate model.
        model = cls(config, *model_args, **model_kwargs)

        if from_pt:
Julien Plu's avatar
Julien Plu committed
1261
1262
            from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model

thomwolf's avatar
thomwolf committed
1263
            # Load from a PyTorch checkpoint
thomwolf's avatar
thomwolf committed
1264
            return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True)
thomwolf's avatar
thomwolf committed
1265

Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
1266
1267
1268
1269
1270
1271
        # we might need to extend the variable scope for composite models
        if load_weight_prefix is not None:
            with tf.compat.v1.variable_scope(load_weight_prefix):
                model(model.dummy_inputs)  # build the network with dummy inputs
        else:
            model(model.dummy_inputs)  # build the network with dummy inputs
thomwolf's avatar
thomwolf committed
1272

1273
        assert os.path.isfile(resolved_archive_file), f"Error retrieving file {resolved_archive_file}"
thomwolf's avatar
thomwolf committed
1274
1275
        # 'by_name' allow us to do transfer learning by skipping/adding layers
        # see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357
1276
        try:
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
1277
            missing_keys, unexpected_keys = load_tf_weights(model, resolved_archive_file, load_weight_prefix)
1278
        except OSError:
1279
1280
1281
1282
            raise OSError(
                "Unable to load weights from h5 file. "
                "If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. "
            )
thomwolf's avatar
thomwolf committed
1283

Julien Plu's avatar
Julien Plu committed
1284
        model(model.dummy_inputs)  # Make sure restore ops are run
thomwolf's avatar
thomwolf committed
1285

1286
1287
        if cls._keys_to_ignore_on_load_missing is not None:
            for pat in cls._keys_to_ignore_on_load_missing:
1288
1289
                missing_keys = [k for k in missing_keys if re.search(pat, k) is None]

1290
1291
        if cls._keys_to_ignore_on_load_unexpected is not None:
            for pat in cls._keys_to_ignore_on_load_unexpected:
Julien Plu's avatar
Julien Plu committed
1292
1293
                unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]

1294
1295
        if len(unexpected_keys) > 0:
            logger.warning(
Julien Plu's avatar
Julien Plu committed
1296
                f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when "
1297
1298
                f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
                f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
1299
                f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
1300
1301
1302
1303
                f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
                f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
            )
        else:
Julien Plu's avatar
Julien Plu committed
1304
1305
            logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n")

thomwolf's avatar
thomwolf committed
1306
        if len(missing_keys) > 0:
1307
            logger.warning(
Julien Plu's avatar
Julien Plu committed
1308
                f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
1309
1310
                f"and are newly initialized: {missing_keys}\n"
                f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
1311
            )
1312
1313
        else:
            logger.warning(
Julien Plu's avatar
Julien Plu committed
1314
                f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
1315
                f"If your task is similar to the task the model of the checkpoint was trained on, "
1316
                f"you can already use {model.__class__.__name__} for predictions without further training."
1317
            )
Julien Plu's avatar
Julien Plu committed
1318

thomwolf's avatar
thomwolf committed
1319
        if output_loading_info:
Julien Plu's avatar
Julien Plu committed
1320
1321
            loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys}

thomwolf's avatar
thomwolf committed
1322
1323
            return model, loading_info

thomwolf's avatar
thomwolf committed
1324
        return model
thomwolf's avatar
WIP  
thomwolf committed
1325

1326

thomwolf's avatar
WIP  
thomwolf committed
1327
class TFConv1D(tf.keras.layers.Layer):
Sylvain Gugger's avatar
Sylvain Gugger committed
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
    """
    1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).

    Basically works like a linear layer but the weights are transposed.

    Args:
        nf (:obj:`int`):
            The number of output features.
        nx (:obj:`int`):
            The number of input features.
        initializer_range (:obj:`float`, `optional`, defaults to 0.02):
            The standard deviation to use to initialize the weights.
        kwargs:
            Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
    """

thomwolf's avatar
thomwolf committed
1344
    def __init__(self, nf, nx, initializer_range=0.02, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
1345
        super().__init__(**kwargs)
thomwolf's avatar
WIP  
thomwolf committed
1346
        self.nf = nf
thomwolf's avatar
thomwolf committed
1347
        self.nx = nx
thomwolf's avatar
thomwolf committed
1348
        self.initializer_range = initializer_range
thomwolf's avatar
thomwolf committed
1349
1350
1351

    def build(self, input_shape):
        self.weight = self.add_weight(
1352
1353
1354
            "weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range)
        )
        self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer())
thomwolf's avatar
thomwolf committed
1355

thomwolf's avatar
WIP  
thomwolf committed
1356
    def call(self, x):
thomwolf's avatar
thomwolf committed
1357
        bz, sl = shape_list(x)[:2]
thomwolf's avatar
thomwolf committed
1358

thomwolf's avatar
thomwolf committed
1359
        x = tf.reshape(x, [-1, self.nx])
thomwolf's avatar
thomwolf committed
1360
        x = tf.matmul(x, self.weight) + self.bias
thomwolf's avatar
thomwolf committed
1361
1362

        x = tf.reshape(x, [bz, sl, self.nf])
thomwolf's avatar
thomwolf committed
1363

thomwolf's avatar
WIP  
thomwolf committed
1364
        return x
thomwolf's avatar
thomwolf committed
1365
1366


thomwolf's avatar
thomwolf committed
1367
class TFSharedEmbeddings(tf.keras.layers.Layer):
Stas Bekman's avatar
Stas Bekman committed
1368
    r"""
Sylvain Gugger's avatar
Sylvain Gugger committed
1369
    Construct shared token embeddings.
1370

Sylvain Gugger's avatar
Sylvain Gugger committed
1371
1372
    The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language
    modeling.
Sylvain Gugger's avatar
Sylvain Gugger committed
1373
1374
1375

    Args:
        vocab_size (:obj:`int`):
1376
            The size of the vocabulary, e.g., the number of unique tokens.
Sylvain Gugger's avatar
Sylvain Gugger committed
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
        hidden_size (:obj:`int`):
            The size of the embedding vectors.
        initializer_range (:obj:`float`, `optional`):
            The standard deviation to use when initializing the weights. If no value is provided, it will default to
            :math:`1/\sqrt{hidden\_size}`.
        kwargs:
            Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
    """

    def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
1387
        super().__init__(**kwargs)
thomwolf's avatar
thomwolf committed
1388
1389
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
1390
        self.initializer_range = hidden_size ** -0.5 if initializer_range is None else initializer_range
thomwolf's avatar
thomwolf committed
1391
1392

    def build(self, input_shape):
Sylvain Gugger's avatar
Sylvain Gugger committed
1393
1394
1395
        """
        Build shared token embedding layer Shared weights logic adapted from
        https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
thomwolf's avatar
thomwolf committed
1396
1397
        """
        self.weight = self.add_weight(
1398
1399
            "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range)
        )
Julien Chaumond's avatar
Julien Chaumond committed
1400
        super().build(input_shape)
thomwolf's avatar
thomwolf committed
1401

Julien Plu's avatar
Julien Plu committed
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
    def get_config(self):
        config = {
            "vocab_size": self.vocab_size,
            "hidden_size": self.hidden_size,
            "initializer_range": self.initializer_range,
        }
        base_config = super().get_config()

        return dict(list(base_config.items()) + list(config.items()))

Sylvain Gugger's avatar
Sylvain Gugger committed
1412
1413
1414
1415
    def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor:
        """
        Get token embeddings of inputs or decode final hidden state.

thomwolf's avatar
thomwolf committed
1416
        Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
1417
1418
1419
1420
1421
1422
1423
1424
            inputs (:obj:`tf.Tensor`):
                In embedding mode, should be an int64 tensor with shape :obj:`[batch_size, length]`.

                In linear mode, should be a float tensor with shape :obj:`[batch_size, length, hidden_size]`.
            mode (:obj:`str`, defaults to :obj:`"embedding"`):
               A valid value is either :obj:`"embedding"` or :obj:`"linear"`, the first one indicates that the layer
               should be used as an embedding layer, the second one that the layer should be used as a linear decoder.

thomwolf's avatar
thomwolf committed
1425
        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
1426
            :obj:`tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape
Sylvain Gugger's avatar
Sylvain Gugger committed
1427
1428
            :obj:`[batch_size, length, embedding_size]`.

1429
            In linear mode, the output is a float32 with shape :obj:`[batch_size, length, vocab_size]`.
Sylvain Gugger's avatar
Sylvain Gugger committed
1430

thomwolf's avatar
thomwolf committed
1431
        Raises:
Sylvain Gugger's avatar
Sylvain Gugger committed
1432
            ValueError: if :obj:`mode` is not valid.
1433

Sylvain Gugger's avatar
Sylvain Gugger committed
1434
1435
        Shared weights logic is adapted from `here
        <https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24>`__.
thomwolf's avatar
thomwolf committed
1436
1437
1438
1439
1440
1441
        """
        if mode == "embedding":
            return self._embedding(inputs)
        elif mode == "linear":
            return self._linear(inputs)
        else:
1442
            raise ValueError(f"mode {mode} is not valid.")
thomwolf's avatar
thomwolf committed
1443
1444
1445
1446
1447
1448
1449

    def _embedding(self, input_ids):
        """Applies embedding based on inputs tensor."""
        return tf.gather(self.weight, input_ids)

    def _linear(self, inputs):
        """
Julien Plu's avatar
Julien Plu committed
1450
        Computes logits by running inputs through a linear layer.
thomwolf's avatar
thomwolf committed
1451

Julien Plu's avatar
Julien Plu committed
1452
1453
1454
1455
1456
1457
1458
        Args:
            inputs: A float32 tensor with shape [..., hidden_size]

        Returns:
            float32 tensor with shape [..., vocab_size].
        """
        first_dims = shape_list(inputs)[:-1]
thomwolf's avatar
thomwolf committed
1459
1460
1461
1462
1463
1464
        x = tf.reshape(inputs, [-1, self.hidden_size])
        logits = tf.matmul(x, self.weight, transpose_b=True)

        return tf.reshape(logits, first_dims + [self.vocab_size])


thomwolf's avatar
thomwolf committed
1465
class TFSequenceSummary(tf.keras.layers.Layer):
Julien Plu's avatar
Julien Plu committed
1466
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1467
1468
1469
1470
    Compute a single vector summary of a sequence hidden states.

    Args:
        config (:class:`~transformers.PretrainedConfig`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1471
1472
            The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
            config class of your model for the default values it uses):
Sylvain Gugger's avatar
Sylvain Gugger committed
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484

            - **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are:

                - :obj:`"last"` -- Take the last token hidden state (like XLNet)
                - :obj:`"first"` -- Take the first token hidden state (like Bert)
                - :obj:`"mean"` -- Take the mean of all tokens hidden states
                - :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
                - :obj:`"attn"` -- Not implemented now, use multi-head attention

            - **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction.
            - **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to
              :obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`).
Sylvain Gugger's avatar
Sylvain Gugger committed
1485
            - **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the
Sylvain Gugger's avatar
Sylvain Gugger committed
1486
1487
1488
1489
1490
1491
1492
1493
1494
              output, another string or :obj:`None` will add no activation.
            - **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and
              activation.
            - **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and
              activation.

        initializer_range (:obj:`float`, defaults to 0.02): The standard deviation to use to initialize the weights.
        kwargs:
            Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
thomwolf's avatar
thomwolf committed
1495
    """
1496

Sylvain Gugger's avatar
Sylvain Gugger committed
1497
    def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
1498
        super().__init__(**kwargs)
thomwolf's avatar
thomwolf committed
1499

1500
1501
        self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last"
        if self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
1502
1503
1504
1505
1506
            # We should use a standard multi-head attention module with absolute positional embedding for that.
            # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
            # We can probably just use the multi-head attention module of PyTorch >=1.1.0
            raise NotImplementedError

1507
        self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj
1508
        if self.has_summary:
1509
            if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
thomwolf's avatar
thomwolf committed
1510
1511
1512
                num_classes = config.num_labels
            else:
                num_classes = config.hidden_size
1513
1514
1515
            self.summary = tf.keras.layers.Dense(
                num_classes, kernel_initializer=get_initializer(initializer_range), name="summary"
            )
thomwolf's avatar
thomwolf committed
1516

1517
        self.has_activation = hasattr(config, "summary_activation") and config.summary_activation == "tanh"
1518
        if self.has_activation:
1519
            self.activation = tf.keras.activations.tanh
thomwolf's avatar
thomwolf committed
1520

1521
        self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0
1522
        if self.has_first_dropout:
thomwolf's avatar
thomwolf committed
1523
1524
            self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout)

1525
        self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0
1526
        if self.has_last_dropout:
thomwolf's avatar
thomwolf committed
1527
1528
            self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout)

Julien Plu's avatar
Julien Plu committed
1529
    def call(self, inputs, cls_index=None, training=False):
thomwolf's avatar
thomwolf committed
1530
1531
1532
1533
1534
1535
1536
        if not isinstance(inputs, (dict, tuple, list)):
            hidden_states = inputs
        elif isinstance(inputs, (tuple, list)):
            hidden_states = inputs[0]
            cls_index = inputs[1] if len(inputs) > 1 else None
            assert len(inputs) <= 2, "Too many inputs."
        else:
1537
            hidden_states = inputs.get("hidden_states")
1538
            cls_index = inputs.get("cls_index", None)
thomwolf's avatar
thomwolf committed
1539

1540
        if self.summary_type == "last":
thomwolf's avatar
thomwolf committed
1541
            output = hidden_states[:, -1]
1542
        elif self.summary_type == "first":
thomwolf's avatar
thomwolf committed
1543
            output = hidden_states[:, 0]
1544
        elif self.summary_type == "mean":
Lysandre's avatar
Lysandre committed
1545
            output = tf.reduce_mean(hidden_states, axis=1)
1546
        elif self.summary_type == "cls_index":
1547
            hidden_shape = shape_list(hidden_states)  # e.g. [batch, num choices, seq length, hidden dims]
thomwolf's avatar
thomwolf committed
1548
            if cls_index is None:
1549
1550
1551
                cls_index = tf.fill(
                    hidden_shape[:-2], hidden_shape[-2] - 1
                )  # A tensor full of shape [batch] or [batch, num choices] full of sequence length
1552
1553
            cls_shape = shape_list(cls_index)
            if len(cls_shape) <= len(hidden_shape) - 2:
1554
                cls_index = tf.expand_dims(cls_index, axis=-1)
1555
            # else:
1556
1557
            # cls_index = cls_index[..., tf.newaxis]
            # cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),))
thomwolf's avatar
thomwolf committed
1558
            # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
1559
            output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2)
1560
1561
1562
1563
            output = tf.squeeze(
                output, axis=len(hidden_shape) - 2
            )  # shape of output: (batch, num choices, hidden_size)
        elif self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
1564
1565
            raise NotImplementedError

1566
1567
        if self.has_first_dropout:
            output = self.first_dropout(output, training=training)
thomwolf's avatar
thomwolf committed
1568

1569
        if self.has_summary:
1570
            output = self.summary(output)
thomwolf's avatar
thomwolf committed
1571

1572
        if self.has_activation:
thomwolf's avatar
thomwolf committed
1573
1574
            output = self.activation(output)

1575
1576
        if self.has_last_dropout:
            output = self.last_dropout(output, training=training)
thomwolf's avatar
thomwolf committed
1577
1578
1579

        return output

1580

Julien Plu's avatar
Julien Plu committed
1581
def shape_list(tensor: tf.Tensor) -> List[int]:
Sylvain Gugger's avatar
Sylvain Gugger committed
1582
1583
1584
1585
    """
    Deal with dynamic shape in tensorflow cleanly.

    Args:
Julien Plu's avatar
Julien Plu committed
1586
        tensor (:obj:`tf.Tensor`): The tensor we want the shape of.
Sylvain Gugger's avatar
Sylvain Gugger committed
1587
1588
1589
1590

    Returns:
        :obj:`List[int]`: The shape of the tensor as a list.
    """
Julien Plu's avatar
Julien Plu committed
1591
    dynamic = tf.shape(tensor)
Julien Plu's avatar
Julien Plu committed
1592
1593

    if tensor.shape == tf.TensorShape(None):
1594
        return dynamic
Julien Plu's avatar
Julien Plu committed
1595
1596
1597

    static = tensor.shape.as_list()

thomwolf's avatar
thomwolf committed
1598
    return [dynamic[i] if s is None else s for i, s in enumerate(static)]
thomwolf's avatar
thomwolf committed
1599

1600

Sylvain Gugger's avatar
Sylvain Gugger committed
1601
1602
1603
1604
def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal:
    """
    Creates a :obj:`tf.initializers.TruncatedNormal` with the given range.

Julien Chaumond's avatar
Julien Chaumond committed
1605
    Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
1606
1607
        initializer_range (`float`, defaults to 0.02): Standard deviation of the initializer range.

Julien Chaumond's avatar
Julien Chaumond committed
1608
    Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
1609
        :obj:`tf.initializers.TruncatedNormal`: The truncated normal initializer.
Julien Chaumond's avatar
Julien Chaumond committed
1610
1611
    """
    return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
1612
1613


Sam Shleifer's avatar
Sam Shleifer committed
1614
1615
class TFWrappedEmbeddings:
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1616
1617
1618
    this class wraps a the TFSharedEmbeddingTokens layer into a python 'no-keras-layer' class to avoid problem with
    weight restoring. Also it makes sure that the layer is called from the correct scope to avoid problem with
    saving/storing the correct weights
Sam Shleifer's avatar
Sam Shleifer committed
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
    """

    def __init__(self, layer, abs_scope_name=None):
        self._layer = layer
        self._abs_scope_name = abs_scope_name

    def call(self, inputs, mode="embedding"):
        if self._abs_scope_name is None:
            return self._layer.call(inputs, mode)

        # if an abs scope name is given to the embedding variable, call variable from absolute scope
        with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
            with tf.name_scope(abs_scope_name.original_name_scope):
                return self._layer.call(inputs, mode)

    def __call__(self, inputs, mode="embedding"):
        if self._abs_scope_name is None:
            return self._layer(inputs, mode)

        # if an abs scope name is given to the embedding variable, call variable from absolute scope
        with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
            with tf.name_scope(abs_scope_name.original_name_scope):
                return self._layer(inputs, mode)