modeling_tf_utils.py 87 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF general model utils."""
Julien Plu's avatar
Julien Plu committed
17

18
import functools
Julien Plu's avatar
Julien Plu committed
19
import inspect
thomwolf's avatar
thomwolf committed
20
import os
21
import pickle
22
import re
Julien Plu's avatar
Julien Plu committed
23
import warnings
Sylvain Gugger's avatar
Sylvain Gugger committed
24
from typing import Dict, List, Optional, Union
thomwolf's avatar
thomwolf committed
25

Aymeric Augustin's avatar
Aymeric Augustin committed
26
import h5py
Julien Chaumond's avatar
Julien Chaumond committed
27
import numpy as np
thomwolf's avatar
thomwolf committed
28
import tensorflow as tf
Julien Plu's avatar
Julien Plu committed
29
from tensorflow.python.keras import backend as K
Matt's avatar
Matt committed
30
from tensorflow.python.keras.engine import data_adapter
31
from tensorflow.python.keras.engine.keras_tensor import KerasTensor
thomwolf's avatar
thomwolf committed
32
from tensorflow.python.keras.saving import hdf5_format
thomwolf's avatar
thomwolf committed
33

34
35
from huggingface_hub import Repository, list_repo_files

thomwolf's avatar
thomwolf committed
36
from .configuration_utils import PretrainedConfig
Julien Plu's avatar
Julien Plu committed
37
38
39
40
41
from .file_utils import (
    DUMMY_INPUTS,
    TF2_WEIGHTS_NAME,
    WEIGHTS_NAME,
    ModelOutput,
Sylvain Gugger's avatar
Sylvain Gugger committed
42
    PushToHubMixin,
Julien Plu's avatar
Julien Plu committed
43
    cached_path,
44
    copy_func,
Julien Plu's avatar
Julien Plu committed
45
    hf_bucket_url,
46
    is_offline_mode,
Julien Plu's avatar
Julien Plu committed
47
48
    is_remote_url,
)
49
from .generation_tf_utils import TFGenerationMixin
50
from .modeling_tf_outputs import TFSeq2SeqLMOutput
Julien Plu's avatar
Julien Plu committed
51
from .tokenization_utils_base import BatchEncoding
Lysandre Debut's avatar
Lysandre Debut committed
52
from .utils import logging
thomwolf's avatar
thomwolf committed
53

Aymeric Augustin's avatar
Aymeric Augustin committed
54

Lysandre Debut's avatar
Lysandre Debut committed
55
logger = logging.get_logger(__name__)
56
tf_logger = tf.get_logger()
thomwolf's avatar
thomwolf committed
57

Julien Plu's avatar
Julien Plu committed
58
TFModelInputType = Union[
59
60
61
62
63
64
65
66
67
    List[tf.Tensor],
    List[np.ndarray],
    List[KerasTensor],
    Dict[str, tf.Tensor],
    Dict[str, np.ndarray],
    Dict[str, KerasTensor],
    tf.Tensor,
    np.ndarray,
    KerasTensor,
Julien Plu's avatar
Julien Plu committed
68
69
]

70

Matt's avatar
Matt committed
71
72
73
74
def dummy_loss(y_true, y_pred):
    return tf.reduce_mean(y_pred)


75
class TFModelUtilsMixin:
Julien Chaumond's avatar
Julien Chaumond committed
76
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
77
    A few utilities for :obj:`tf.keras.Model`, to be used as a mixin.
Julien Chaumond's avatar
Julien Chaumond committed
78
79
80
81
    """

    def num_parameters(self, only_trainable: bool = False) -> int:
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
82
83
84
85
86
87
88
89
        Get the number of (optionally, trainable) parameters in the model.

        Args:
            only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to return only the number of trainable parameters

        Returns:
            :obj:`int`: The number of parameters.
Julien Chaumond's avatar
Julien Chaumond committed
90
91
92
93
94
95
96
        """
        if only_trainable:
            return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables))
        else:
            return self.count_params()


97
def keras_serializable(cls):
98
99
100
101
    """
    Decorate a Keras Layer class to support Keras serialization.

    This is done by:
Sylvain Gugger's avatar
Sylvain Gugger committed
102
103
104
105
106

    1. Adding a :obj:`transformers_config` dict to the Keras config dictionary in :obj:`get_config` (called by Keras at
       serialization time.
    2. Wrapping :obj:`__init__` to accept that :obj:`transformers_config` dict (passed by Keras at deserialization
       time) and convert it to a config object for the actual layer initializer.
Sylvain Gugger's avatar
Sylvain Gugger committed
107
108
    3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not
       need to be supplied in :obj:`custom_objects` in the call to :obj:`tf.keras.models.load_model`.
Sylvain Gugger's avatar
Sylvain Gugger committed
109
110
111
112
113
114
115
116

    Args:
        cls (a :obj:`tf.keras.layers.Layers subclass`):
            Typically a :obj:`TF.MainLayer` class in this project, in general must accept a :obj:`config` argument to
            its initializer.

    Returns:
        The same class object, with modifications for Keras deserialization.
117
    """
118
    initializer = cls.__init__
119

120
121
122
123
    config_class = getattr(cls, "config_class", None)
    if config_class is None:
        raise AttributeError("Must set `config_class` to use @keras_serializable")

124
    @functools.wraps(initializer)
125
    def wrapped_init(self, *args, **kwargs):
126
127
128
129
        config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None)

        if isinstance(config, dict):
            config = config_class.from_dict(config)
130
            initializer(self, config, *args, **kwargs)
131
132
133
134
135
        elif isinstance(config, PretrainedConfig):
            if len(args) > 0:
                initializer(self, *args, **kwargs)
            else:
                initializer(self, config, *args, **kwargs)
136
        else:
137
138
139
            raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)")

        self._config = config
Julien Plu's avatar
Julien Plu committed
140
        self._kwargs = kwargs
141

142
143
144
145
146
147
148
149
    cls.__init__ = wrapped_init

    if not hasattr(cls, "get_config"):
        raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses")
    if hasattr(cls.get_config, "_is_default"):

        def get_config(self):
            cfg = super(cls, self).get_config()
150
            cfg["config"] = self._config.to_dict()
Julien Plu's avatar
Julien Plu committed
151
            cfg.update(self._kwargs)
152
153
154
155
            return cfg

        cls.get_config = get_config

156
    cls._keras_serializable = True
157
158
159
    if hasattr(tf.keras.utils, "register_keras_serializable"):
        cls = tf.keras.utils.register_keras_serializable()(cls)
    return cls
160
161


162
class TFCausalLanguageModelingLoss:
Sylvain Gugger's avatar
Sylvain Gugger committed
163
164
165
166
167
168
169
170
171
    """
    Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token.

    .. note::

        Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.

    """

172
173
174
175
    def compute_loss(self, labels, logits):
        loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True, reduction=tf.keras.losses.Reduction.NONE
        )
Muennighoff's avatar
Muennighoff committed
176
        # make sure only labels that are not equal to -100 affect the loss
177
        active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
178
179
180
181
182
        reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
        labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
        return loss_fn(labels, reduced_logits)


Julien Plu's avatar
Julien Plu committed
183
class TFQuestionAnsweringLoss:
Sylvain Gugger's avatar
Sylvain Gugger committed
184
    """
185
    Loss function suitable for question answering.
Sylvain Gugger's avatar
Sylvain Gugger committed
186
187
    """

Julien Plu's avatar
Julien Plu committed
188
189
190
191
192
193
194
195
196
197
198
    def compute_loss(self, labels, logits):
        loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True, reduction=tf.keras.losses.Reduction.NONE
        )
        start_loss = loss_fn(labels["start_position"], logits[0])
        end_loss = loss_fn(labels["end_position"], logits[1])

        return (start_loss + end_loss) / 2.0


class TFTokenClassificationLoss:
Sylvain Gugger's avatar
Sylvain Gugger committed
199
200
201
202
203
204
205
206
207
    """
    Loss function suitable for token classification.

    .. note::

        Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.

    """

Julien Plu's avatar
Julien Plu committed
208
209
210
211
    def compute_loss(self, labels, logits):
        loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True, reduction=tf.keras.losses.Reduction.NONE
        )
212
213
        # make sure only labels that are not equal to -100
        # are taken into account as loss
214
        if tf.math.reduce_any(labels == -1):
Julien Plu's avatar
Julien Plu committed
215
216
217
218
            warnings.warn("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.")
            active_loss = tf.reshape(labels, (-1,)) != -1
        else:
            active_loss = tf.reshape(labels, (-1,)) != -100
Julien Plu's avatar
Julien Plu committed
219
220
221
222
223
224
225
        reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
        labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)

        return loss_fn(labels, reduced_logits)


class TFSequenceClassificationLoss:
Sylvain Gugger's avatar
Sylvain Gugger committed
226
227
228
229
    """
    Loss function suitable for sequence classification.
    """

Julien Plu's avatar
Julien Plu committed
230
    def compute_loss(self, labels, logits):
231
        if len(shape_list(logits)) == 1 or shape_list(logits)[1] == 1:
Julien Plu's avatar
Julien Plu committed
232
233
234
235
236
237
238
239
240
            loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)
        else:
            loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
                from_logits=True, reduction=tf.keras.losses.Reduction.NONE
            )

        return loss_fn(labels, logits)


Matt's avatar
Matt committed
241
class TFMultipleChoiceLoss:
Sylvain Gugger's avatar
Sylvain Gugger committed
242
243
    """Loss function suitable for multiple choice tasks."""

Matt's avatar
Matt committed
244
245
246
247
248
249
    def compute_loss(self, labels, logits):
        loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True, reduction=tf.keras.losses.Reduction.NONE
        )
        return loss_fn(labels, logits)

Sylvain Gugger's avatar
Sylvain Gugger committed
250
251
252

class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss):
    """
Lysandre's avatar
Lysandre committed
253
    Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens.
Sylvain Gugger's avatar
Sylvain Gugger committed
254

Lysandre's avatar
Lysandre committed
255
    .. note::
Sylvain Gugger's avatar
Sylvain Gugger committed
256

Lysandre's avatar
Lysandre committed
257
258
         Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
    """
Julien Plu's avatar
Julien Plu committed
259
260


261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
class TFNextSentencePredictionLoss:
    """
    Loss function suitable for next sentence prediction (NSP), that is, the task of guessing the next sentence.

    .. note::
         Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
    """

    def compute_loss(self, labels, logits):
        loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True, reduction=tf.keras.losses.Reduction.NONE
        )
        # make sure only labels that are not equal to -100
        # are taken into account as loss
        next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
        next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss)
        next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss)

        return loss_fn(next_sentence_label, next_sentence_reduced_logits)


282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
def booleans_processing(config, **kwargs):
    """
    Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or
    graph)

    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config of the running model.
        **kwargs:
            The boolean parameters

    Returns:
        A dictionary with the proper values for each boolean
    """
    final_booleans = {}

    if tf.executing_eagerly():
        final_booleans["output_attentions"] = (
            kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions
        )
        final_booleans["output_hidden_states"] = (
            kwargs["output_hidden_states"]
            if kwargs["output_hidden_states"] is not None
            else config.output_hidden_states
        )
Julien Plu's avatar
Julien Plu committed
307
308
309
        final_booleans["return_dict"] = (
            kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict
        )
310
311

        if "use_cache" in kwargs:
312
313
314
            final_booleans["use_cache"] = (
                kwargs["use_cache"] if kwargs["use_cache"] is not None else getattr(config, "use_cache", None)
            )
315
316
    else:
        if (
317
318
            kwargs["output_attentions"] not in (None, config.output_attentions)
            or kwargs["output_hidden_states"] not in (None, config.output_hidden_states)
Matt's avatar
Matt committed
319
            or ("use_cache" in kwargs and kwargs["use_cache"] not in (None, config.use_cache))
320
        ):
321
            tf_logger.warning(
322
                "The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model. "
323
324
325
326
327
328
                "They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained('name', output_attentions=True)`)."
            )

        final_booleans["output_attentions"] = config.output_attentions
        final_booleans["output_hidden_states"] = config.output_hidden_states

329
        if kwargs.get("return_dict", None) not in (None, True):
330
331
332
            tf_logger.warning(
                "The parameter `return_dict` cannot be set in graph mode and will always be set to `True`."
            )
Julien Plu's avatar
Julien Plu committed
333
        final_booleans["return_dict"] = True
334
335

        if "use_cache" in kwargs:
336
            final_booleans["use_cache"] = getattr(config, "use_cache", None)
337
338
339
340
341
342

    return final_booleans


def input_processing(func, config, input_ids, **kwargs):
    """
Julien Plu's avatar
Julien Plu committed
343
344
345
    Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input
    has to be named accordingly to the parameters name, i.e. `input_ids = tf.keras.Input(shape=(128,), dtype='int32',
    name="input_ids")` otherwise the order of the tensors will not be guaranteed during the training.
346
347
348
349
350
351
352
353
354
355
356
357

    Args:
        func (:obj:`callable`):
            The callable function of the TensorFlow model.
        config (:class:`~transformers.PretrainedConfig`):
            The config of the running model.
        **kwargs:
            The inputs of the model.

    Returns:
        Two lists, one for the missing layers, and another one for the unexpected layers.
    """
Julien Plu's avatar
Julien Plu committed
358
359
    signature = dict(inspect.signature(func).parameters)
    signature.pop("kwargs", None)
Julien Plu's avatar
Julien Plu committed
360
    signature.pop("self", None)
Julien Plu's avatar
Julien Plu committed
361
362
    parameter_names = list(signature.keys())
    output = {}
363
    allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray, KerasTensor)
Julien Plu's avatar
Julien Plu committed
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379

    if "inputs" in kwargs["kwargs_call"]:
        warnings.warn(
            "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
            FutureWarning,
        )

        output["input_ids"] = kwargs["kwargs_call"].pop("inputs")

    if "decoder_cached_states" in kwargs["kwargs_call"]:
        warnings.warn(
            "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
            FutureWarning,
        )
        output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states")

380
381
382
383
384
385
386
387
388
    if "past" in kwargs["kwargs_call"] and "past_key_values" in kwargs:
        warnings.warn(
            "The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
            FutureWarning,
        )
        kwargs["past_key_values"] = kwargs["kwargs_call"].pop("past")
    elif "past_key_values" in kwargs["kwargs_call"] and "past" in kwargs:
        kwargs["past"] = kwargs["kwargs_call"].pop("past_key_values")

Julien Plu's avatar
Julien Plu committed
389
390
391
392
393
    if len(kwargs["kwargs_call"]) > 0:
        raise ValueError(
            f"The following keyword arguments are not supported by this model: {list(kwargs['kwargs_call'].keys())}."
        )

Julien Plu's avatar
Julien Plu committed
394
395
    kwargs.pop("kwargs_call")

Julien Plu's avatar
Julien Plu committed
396
397
398
399
    for k, v in kwargs.items():
        if isinstance(v, allowed_types) or v is None:
            output[k] = v
        else:
Julien Plu's avatar
Julien Plu committed
400
            raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
Julien Plu's avatar
Julien Plu committed
401
402
403
404
405

    if isinstance(input_ids, (tuple, list)):
        for i, input in enumerate(input_ids):
            # EagerTensors don't allow to use the .name property so we check for a real Tensor
            if type(input) == tf.Tensor:
Julien Plu's avatar
Julien Plu committed
406
407
                # Tensor names have always the pattern `name:id` then we check only the
                # `name` part
Julien Plu's avatar
Julien Plu committed
408
409
410
411
412
                tensor_name = input.name.split(":")[0]

                if tensor_name in parameter_names:
                    output[tensor_name] = input
                else:
Julien Plu's avatar
Julien Plu committed
413
                    output[parameter_names[i]] = input
Julien Plu's avatar
Julien Plu committed
414
415
416
417
            elif isinstance(input, allowed_types) or input is None:
                output[parameter_names[i]] = input
            else:
                raise ValueError(
Julien Plu's avatar
Julien Plu committed
418
                    f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}."
Julien Plu's avatar
Julien Plu committed
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
                )
    elif isinstance(input_ids, (dict, BatchEncoding)):
        if "inputs" in input_ids:
            warnings.warn(
                "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
                FutureWarning,
            )

            output["input_ids"] = input_ids.pop("inputs")

        if "decoder_cached_states" in input_ids:
            warnings.warn(
                "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
                FutureWarning,
            )
            output["past_key_values"] = input_ids.pop("decoder_cached_states")

        for k, v in dict(input_ids).items():
437
            if isinstance(v, allowed_types) or v is None:
Julien Plu's avatar
Julien Plu committed
438
                output[k] = v
439
            elif k not in parameter_names and "args" not in parameter_names:
440
                logger.warning(
441
442
443
444
                    f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored."
                )
                continue
            else:
Julien Plu's avatar
Julien Plu committed
445
                raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
Julien Plu's avatar
Julien Plu committed
446
    else:
447
        if isinstance(input_ids, (tf.Tensor, KerasTensor)) or input_ids is None:
Julien Plu's avatar
Julien Plu committed
448
449
450
            output[parameter_names[0]] = input_ids
        else:
            raise ValueError(
Julien Plu's avatar
Julien Plu committed
451
                f"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}."
Julien Plu's avatar
Julien Plu committed
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
            )

    for name in parameter_names:
        if name not in list(output.keys()) and name != "args":
            output[name] = kwargs.pop(name, signature[name].default)

    # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)
    # So to respect the proper output we have to add this exception
    if "args" in output:
        if output["args"] is not None and type(output["args"]) == tf.Tensor:
            tensor_name = output["args"].name.split(":")[0]
            output[tensor_name] = output["args"]
        else:
            # `args` in this case is always the first parameter, then `input_ids`
            output["input_ids"] = output["args"]

        del output["args"]

    if "kwargs" in output:
        del output["kwargs"]

473
474
475
476
477
478
479
480
481
482
483
484
485
    boolean_dict = {
        k: v
        for k, v in output.items()
        if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"]
    }

    output.update(
        booleans_processing(
            config=config,
            **boolean_dict,
        )
    )

Julien Plu's avatar
Julien Plu committed
486
487
488
    return output


489
def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):
Julien Plu's avatar
Julien Plu committed
490
    """
Julien Plu's avatar
Julien Plu committed
491
    Detect missing and unexpected layers and load the TF weights accordingly to their names and shapes.
Julien Plu's avatar
Julien Plu committed
492
493
494
495
496
497

    Args:
        model (:obj:`tf.keras.models.Model`):
            The model to load the weights into.
        resolved_archive_file (:obj:`str`):
            The location of the H5 file.
498
499
        ignore_mismatched_sizes (:obj:`bool`, `optional`, defaults to :obj:`False`):
            Whether or not to ignore weights with shapes that don't match between the checkpoint of the model.
Julien Plu's avatar
Julien Plu committed
500
501

    Returns:
502
503
        Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the
        mismatched layers.
Julien Plu's avatar
Julien Plu committed
504
505
506
    """
    missing_layers = []
    unexpected_layers = []
507
    mismatched_layers = []
Julien Plu's avatar
Julien Plu committed
508

Julien Plu's avatar
Julien Plu committed
509
    # Read the H5 file
Julien Plu's avatar
Julien Plu committed
510
    with h5py.File(resolved_archive_file, "r") as f:
Julien Plu's avatar
Julien Plu committed
511
512
        # Retrieve the name of each layer from the H5 file
        saved_h5_model_layers_name = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names"))
Julien Plu's avatar
Julien Plu committed
513

Julien Plu's avatar
Julien Plu committed
514
515
        # Find the missing layers from the high level list of layers
        missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name)
Julien Plu's avatar
Julien Plu committed
516

Julien Plu's avatar
Julien Plu committed
517
518
519
520
        # Find the unexpected layers from the high level list of layers
        unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers]))
        saved_weight_names_set = set()
        symbolic_weights_names = set()
Julien Plu's avatar
Julien Plu committed
521
522
        weight_value_tuples = []

Julien Plu's avatar
Julien Plu committed
523
524
        # Compute missing and unexpected sub layers
        # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...]
Julien Plu's avatar
Julien Plu committed
525
        for layer in model.layers:
Julien Plu's avatar
Julien Plu committed
526
527
528
529
530
            # if layer_name from the H5 file belongs to the layers from the instantiated model
            if layer.name in saved_h5_model_layers_name:
                # Get the H5 layer object from its name
                h5_layer_object = f[layer.name]
                # Get all the weights as a list from the layer object
Julien Plu's avatar
Julien Plu committed
531
                symbolic_weights = layer.trainable_weights + layer.non_trainable_weights
Julien Plu's avatar
Julien Plu committed
532
                saved_weights = {}
Julien Plu's avatar
Julien Plu committed
533

Julien Plu's avatar
Julien Plu committed
534
535
536
537
                # Create a dict from the H5 saved model that looks like {"weight_name": weight_value}
                # And a set with only the names
                for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, "weight_names"):
                    # TF names always start with the model name so we ignore it
Julien Plu's avatar
Julien Plu committed
538
                    name = "/".join(weight_name.split("/")[1:])
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
539
540
541
542

                    if _prefix is not None:
                        name = _prefix + "/" + name

Julien Plu's avatar
Julien Plu committed
543
                    saved_weights[name] = np.asarray(h5_layer_object[weight_name])
Julien Plu's avatar
Julien Plu committed
544

Julien Plu's avatar
Julien Plu committed
545
546
547
548
                    # Add the updated name to the final list for computing missing/unexpected values
                    saved_weight_names_set.add(name)

                # Loop over each weights from the instantiated model and compare with the weights from the H5 file
Julien Plu's avatar
Julien Plu committed
549
                for symbolic_weight in symbolic_weights:
Julien Plu's avatar
Julien Plu committed
550
                    # TF names always start with the model name so we ignore it
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
551
552
553
554
555
556
557
558
                    if _prefix is not None:
                        delimeter = len(_prefix.split("/"))
                        symbolic_weight_name = "/".join(
                            symbolic_weight.name.split("/")[:delimeter]
                            + symbolic_weight.name.split("/")[delimeter + 1 :]
                        )
                    else:
                        symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:])
Julien Plu's avatar
Julien Plu committed
559
560
561
562
563

                    # here we check if the current weight is among the weights from the H5 file
                    # If yes, get the weight_value of the corresponding weight from the H5 file
                    # If not, make the value to None
                    saved_weight_value = saved_weights.get(symbolic_weight_name, None)
Julien Plu's avatar
Julien Plu committed
564

Julien Plu's avatar
Julien Plu committed
565
566
                    # Add the updated name to the final list for computing missing/unexpected values
                    symbolic_weights_names.add(symbolic_weight_name)
Julien Plu's avatar
Julien Plu committed
567

Julien Plu's avatar
Julien Plu committed
568
569
570
                    # If the current weight is found
                    if saved_weight_value is not None:
                        # Check if the shape of the current weight and the one from the H5 file are different
Julien Plu's avatar
Julien Plu committed
571
                        if K.int_shape(symbolic_weight) != saved_weight_value.shape:
Julien Plu's avatar
Julien Plu committed
572
573
                            # If yes we reshape the weight from the H5 file accordingly to the current weight
                            # If the two shapes are not compatible we raise an issue
Julien Plu's avatar
Julien Plu committed
574
575
                            try:
                                array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight))
576
577
578
579
580
581
582
583
                            except ValueError as e:
                                if ignore_mismatched_sizes:
                                    mismatched_layers.append(
                                        (symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight))
                                    )
                                    continue
                                else:
                                    raise e
Julien Plu's avatar
Julien Plu committed
584
585
586
                        else:
                            array = saved_weight_value

Julien Plu's avatar
Julien Plu committed
587
                        # We create the tuple that will be loaded and add it to the final list
Julien Plu's avatar
Julien Plu committed
588
589
                        weight_value_tuples.append((symbolic_weight, array))

Julien Plu's avatar
Julien Plu committed
590
    # Load all the weights
Julien Plu's avatar
Julien Plu committed
591
592
    K.batch_set_value(weight_value_tuples)

Julien Plu's avatar
Julien Plu committed
593
594
595
596
    # Compute the missing and unexpected layers
    missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set))
    unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names))

597
    return missing_layers, unexpected_layers, mismatched_layers
Julien Plu's avatar
Julien Plu committed
598

Julien Plu's avatar
Julien Plu committed
599

600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
def init_copy_embeddings(old_embeddings, new_num_tokens):
    r"""
    This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case
    new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be
    kept or not. Example:

        - if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4]

            -  mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1]
        - if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5]

            - mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4]
    """
    old_num_tokens, old_embedding_dim = shape_list(old_embeddings)
    size_diff = new_num_tokens - old_num_tokens

    # initialize new embeddings
    # Copy token embeddings from the previous ones
    if tf.math.greater(size_diff, 0):
        # if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size
        # and we create a mask to properly identify the padded values and be replaced by the values of the newly created
        # embeddings
        current_weights = tf.pad(
            old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1
        )
        num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
        mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True)
        mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False)
    else:
        # if the new size if lower than the old one, we take the current embeddings until the new size
        current_weights = tf.slice(
            old_embeddings.value(),
            tf.convert_to_tensor([0, 0]),
            tf.convert_to_tensor([new_num_tokens, old_embedding_dim]),
        )
        mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True)

    return mask, current_weights


Sylvain Gugger's avatar
Sylvain Gugger committed
640
class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin):
641
642
    r"""
    Base class for all TF models.
thomwolf's avatar
thomwolf committed
643

644
645
    :class:`~transformers.TFPreTrainedModel` takes care of storing the configuration of the models and handles methods
    for loading, downloading and saving models as well as a few methods common to all models to:
thomwolf's avatar
thomwolf committed
646

647
648
        * resize the input embeddings,
        * prune heads in the self-attention heads.
thomwolf's avatar
thomwolf committed
649

650
    Class attributes (overridden by derived classes):
Sylvain Gugger's avatar
Sylvain Gugger committed
651

652
653
654
655
        - **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of
          :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
        - **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in
          derived classes of the same architecture adding modules on top of the base model.
thomwolf's avatar
thomwolf committed
656
657
658
    """
    config_class = None
    base_model_prefix = ""
659
660
661
662
663
664
    # a list of re pattern of tensor names to ignore from the model when loading the model weights
    # (and avoid unnecessary warnings).
    _keys_to_ignore_on_load_missing = None
    # a list of re pattern of tensor names to ignore from the weights when loading the model weights
    # (and avoid unnecessary warnings).
    _keys_to_ignore_on_load_unexpected = None
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
665
    _requires_load_weight_prefix = False
thomwolf's avatar
thomwolf committed
666

667
    @property
668
669
    def dummy_inputs(self) -> Dict[str, tf.Tensor]:
        """
Julien Plu's avatar
Julien Plu committed
670
671
672
673
        Dummy inputs to build the network.

        Returns:
            :obj:`Dict[str, tf.Tensor]`: The dummy inputs.
674
        """
Julien Plu's avatar
Julien Plu committed
675
676
677
        return {
            "input_ids": tf.constant(DUMMY_INPUTS),
        }
thomwolf's avatar
thomwolf committed
678

679
680
681
682
683
684
685
    @property
    def framework(self) -> str:
        """
        :str: Identifies that this is a TensorFlow model.
        """
        return "tf"

thomwolf's avatar
thomwolf committed
686
    def __init__(self, config, *inputs, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
687
        super().__init__(*inputs, **kwargs)
thomwolf's avatar
thomwolf committed
688
689
        if not isinstance(config, PretrainedConfig):
            raise ValueError(
690
691
692
                f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
                "`PretrainedConfig`. To create a model from a pretrained model use "
                f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
693
            )
694
        # Save config and origin of the pretrained weights if given in model
thomwolf's avatar
thomwolf committed
695
        self.config = config
696
        self.name_or_path = config.name_or_path
thomwolf's avatar
thomwolf committed
697

698
    def get_config(self):
699
        return self.config.to_dict()
700
701
702

    @classmethod
    def from_config(cls, config, **kwargs):
703
704
705
        if isinstance(config, PretrainedConfig):
            return cls._from_config(config, **kwargs)
        return cls._from_config(cls.config_class.from_dict(config, **kwargs))
706

707
708
709
710
711
712
713
    @classmethod
    def _from_config(cls, config, **kwargs):
        """
        All context managers that the model should be initialized under go here.
        """
        return cls(config, **kwargs)

Julien Plu's avatar
Julien Plu committed
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
    @tf.function(
        input_signature=[
            {
                "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
                "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
                "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
            }
        ]
    )
    def serving(self, inputs):
        """
        Method used for serving the model.

        Args:
            inputs (:obj:`Dict[str, tf.Tensor]`):
729
                The input of the saved model as a dictionary of tensors.
Julien Plu's avatar
Julien Plu committed
730
731
732
733
734
735
736
737
738
739
        """
        output = self.call(inputs)

        return self.serving_output(output)

    def serving_output(output):
        """
        Prepare the output of the saved model. Each model must implement this function.

        Args:
740
            output (:class:`~transformers.TFBaseModelOutput`):
Julien Plu's avatar
Julien Plu committed
741
742
743
744
                The output returned by the model.
        """
        raise NotImplementedError

745
    def get_input_embeddings(self) -> tf.keras.layers.Layer:
746
        """
747
        Returns the model's input embeddings layer.
748
749

        Returns:
750
            :obj:`tf.Variable`: The embeddings layer mapping vocabulary to hidden states.
751
        """
752
        main_layer = getattr(self, self.base_model_prefix, self)
Julien Plu's avatar
Julien Plu committed
753

754
755
        if main_layer is not self:
            return main_layer.get_input_embeddings()
756
757
758
        else:
            raise NotImplementedError

759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
    def _save_checkpoint(self, checkpoint_dir, epoch):
        if not os.path.isdir(checkpoint_dir):
            os.mkdir(checkpoint_dir)
        # We avoid tf.train.checkpoint or saving weights in TF format, even though that includes optimizer
        # state for us, because it requires special handling for objects like custom losses, which we use
        # internally and which users are likely to use too
        weights_path = os.path.join(checkpoint_dir, "weights.h5")
        self.save_weights(weights_path)
        extra_data = {"epoch": epoch, "optimizer_state": self.optimizer.get_weights()}
        extra_data_path = os.path.join(checkpoint_dir, "extra_data.pickle")
        with open(extra_data_path, "wb") as f:
            pickle.dump(extra_data, f)

    def load_repo_checkpoint(self, repo_path_or_name):
        """
        Loads a saved checkpoint (model weights and optimizer state) from a repo. Returns the current epoch count when
        the checkpoint was made.

        Args:
            repo_path_or_name (:obj:`str`):
                Can either be a repository name for your {object} in the Hub or a path to a local folder (in which case
                the repository will have the name of that local folder).

        Returns:
            :obj:`dict`: A dictionary of extra metadata from the checkpoint, most commonly an "epoch" count.
        """
        if getattr(self, "optimizer", None) is None:
            raise RuntimeError(
                "Checkpoint loading failed as no optimizer is attached to the model. "
                "This is most likely caused by the model not being compiled."
            )
        if not os.path.isdir(repo_path_or_name):
            # If this isn't a local path, check that the remote repo exists and has a checkpoint in it
            repo_files = list_repo_files(repo_path_or_name)
            for file in ("checkpoint/weights.h5", "checkpoint/extra_data.pickle"):
                if file not in repo_files:
                    raise FileNotFoundError(f"Repo {repo_path_or_name} does not contain checkpoint file {file}!")
            if "/" not in repo_path_or_name:
                model_id = repo_path_or_name
                repo_path_or_name = self.get_full_repo_name(repo_path_or_name)
            else:
                model_id = repo_path_or_name.split("/")[-1]
            repo = Repository(model_id, clone_from=f"https://huggingface.co/{repo_path_or_name}")
            local_dir = repo.local_dir
        else:
            local_dir = repo_path_or_name

        # Now make sure the repo actually has a checkpoint in it.
        checkpoint_dir = os.path.join(local_dir, "checkpoint")
        weights_file = os.path.join(checkpoint_dir, "weights.h5")
        if not os.path.isfile(weights_file):
            raise FileNotFoundError(f"Could not find checkpoint file weights.h5 in repo {repo_path_or_name}!")
        extra_data_file = os.path.join(checkpoint_dir, "extra_data.pickle")
        if not os.path.isfile(extra_data_file):
            raise FileNotFoundError(f"Could not find checkpoint file extra_data.pickle in repo {repo_path_or_name}!")

        # Assuming the repo is real and we got a checkpoint, load the weights and the optimizer state into the model.
        # The optimizer state includes the iteration count, so learning rate schedules should resume as normal too.
        self.load_weights(weights_file)
        with open(extra_data_file, "rb") as f:
            extra_data = pickle.load(f)
        self.optimizer.set_weights(extra_data["optimizer_state"])

        # Finally, return the epoch number from the checkpoint. This isn't a property of the model, so we can't
        # set it directly, but the user can pass it to fit().
        return {"epoch": extra_data["epoch"]}

Matt's avatar
Matt committed
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
    def compile(
        self,
        optimizer="rmsprop",
        loss="passthrough",
        metrics=None,
        loss_weights=None,
        weighted_metrics=None,
        run_eagerly=None,
        steps_per_execution=None,
        **kwargs
    ):
        """
        This is a thin wrapper that sets the model's loss output head as the loss if the user does not specify a loss
        function themselves.
        """
        if loss == "passthrough":
            logger.warning(
                "No loss specified in compile() - the model's internal loss computation will be used as the "
                "loss. Don't panic - this is a common way to train TensorFlow models in Transformers! "
                "Please ensure your labels are passed as the 'labels' key of the input dict so that they are "
                "accessible to the model during the forward pass. To disable this behaviour, please pass a "
                "loss argument, or explicitly pass loss=None if you do not want your model to compute a loss."
            )
            loss = {"loss": dummy_loss}
        super().compile(
            optimizer=optimizer,
            loss=loss,
            metrics=metrics,
            loss_weights=loss_weights,
            weighted_metrics=weighted_metrics,
            run_eagerly=run_eagerly,
            steps_per_execution=steps_per_execution,
            **kwargs,
        )

    def train_step(self, data):
        """
        A modification of Keras's default train_step that cleans up the printed metrics when we use a dummy loss.
        """
        # These are the only transformations `Model.fit` applies to user-input
        # data when a `tf.data.Dataset` is provided.
        data = data_adapter.expand_1d(data)
        x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
        # These next two lines differ from the base method - they avoid issues when the labels are in
        # the input dict (and loss is computed internally)
        if y is None and "labels" in x:
            y = x["labels"]  # Stops confusion with metric computations
        # Run forward pass.
        with tf.GradientTape() as tape:
            y_pred = self(x, training=True)
            loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses)
        # Run backwards pass.
        self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
879
880
881
882
883
        # When y_pred is a ModelOutput and y is a tf.Tensor the metrics update
        # should be done only with the relevant ModelOutput param that is
        # considered by the loss.
        if isinstance(y_pred, TFSeq2SeqLMOutput) and isinstance(y, tf.Tensor):
            y_pred = y_pred["logits"]
Matt's avatar
Matt committed
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
        self.compiled_metrics.update_state(y, y_pred, sample_weight)
        # Collect metrics to return
        return_metrics = {}
        for metric in self.metrics:
            result = metric.result()
            if isinstance(result, dict):
                return_metrics.update(result)
            else:
                return_metrics[metric.name] = result
        # These next two lines are also not in the base method - they correct the displayed metrics
        # when we're using a dummy loss, to avoid a bogus "loss_loss" value being shown.
        if "loss" in return_metrics and "loss_loss" in return_metrics:
            del return_metrics["loss_loss"]
        return return_metrics

    def test_step(self, data):
        """
        A modification of Keras's default test_step that cleans up the printed metrics when we use a dummy loss.
        """
        data = data_adapter.expand_1d(data)
        x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
        # These next two lines differ from the base method - they avoid issues when the labels are in
        # the input dict (and loss is computed internally)
        if y is None and "labels" in x:
            y = x["labels"]  # Stops confusion with metric computations
        y_pred = self(x, training=False)
        self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses)
911
912
913
        # Updates stateful loss metrics.
        if isinstance(y_pred, TFSeq2SeqLMOutput) and isinstance(y, tf.Tensor):
            y_pred = y_pred["logits"]
Matt's avatar
Matt committed
914
915
        self.compiled_metrics.update_state(y, y_pred, sample_weight)
        # Collect metrics to return
916
        return_metrics = {}
Matt's avatar
Matt committed
917
918
919
920
921
922
923
924
925
926
927
928
        for metric in self.metrics:
            result = metric.result()
            if isinstance(result, dict):
                return_metrics.update(result)
            else:
                return_metrics[metric.name] = result
        # These next two lines are also not in the base method - they correct the displayed metrics
        # when we're using a dummy loss, to avoid a bogus "loss_loss" value being shown.
        if "loss" in return_metrics and "loss_loss" in return_metrics:
            del return_metrics["loss_loss"]
        return return_metrics

929
930
    def set_input_embeddings(self, value):
        """
931
        Set model's input embeddings
932
933

        Args:
934
935
            value (:obj:`tf.Variable`):
                The new weights mapping hidden states to vocabulary.
936
        """
937
        main_layer = getattr(self, self.base_model_prefix)
938

939
940
941
942
943
944
945
946
947
948
949
        if main_layer is None:
            raise NotImplementedError("The model does not implements the base_model_prefix attribute.")

        try:
            main_layer.set_input_embeddings(value)
        except AttributeError:
            logger.info("Building the model")
            self(self.dummy_inputs)
            main_layer.set_input_embeddings(value)

    def get_output_embeddings(self) -> Union[None, tf.keras.layers.Layer]:
950
        """
951
        Returns the model's output embeddings
952
953

        Returns:
954
            :obj:`tf.Variable`: The new weights mapping vocabulary to hidden states.
955
        """
956
957
958
        if self.get_lm_head() is not None:
            lm_head = self.get_lm_head()

959
960
961
962
963
964
965
            try:
                return lm_head.get_output_embeddings()
            except AttributeError:
                logger.info("Building the model")
                self(self.dummy_inputs)

                return lm_head().get_output_embeddings()
966

967
968
        return None  # Overwrite for models with output embeddings

969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
    def set_output_embeddings(self, value):
        """
        Set model's output embeddings

        Args:
            value (:obj:`tf.Variable`):
                The new weights mapping hidden states to vocabulary.
        """
        if self.get_lm_head() is not None:
            lm_head = self.get_lm_head()
            try:
                lm_head.set_output_embeddings(value)
            except AttributeError:
                logger.info("Building the model")
                self(self.dummy_inputs)
                lm_head.set_output_embeddings(value)

986
987
988
    def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]:
        """
        Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the
989
        embeddings
990
991
992
993

        Return:
            :obj:`tf.keras.layers.Layer`: The layer that handles the bias, None if not an LM model.
        """
994
995
996
997
        warnings.warn(
            "The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning
        )
        return self.get_lm_head()
998
999
1000

    def get_prefix_bias_name(self) -> Union[None, str]:
        """
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
1001
        Get the concatenated _prefix name of the bias from the model name to the parent layer
1002
1003

        Return:
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
1004
            :obj:`str`: The _prefix name of the bias.
1005
        """
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
        warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
        return None

    def get_bias(self) -> Union[None, Dict[str, tf.Variable]]:
        """
        Dict of bias attached to an LM head. The key represents the name of the bias attribute.

        Return:
            :obj:`tf.Variable`: The weights representing the bias, None if not an LM model.
        """
        if self.get_lm_head() is not None:
            lm_head = self.get_lm_head()
            try:
                return lm_head.get_bias()
            except AttributeError:
                self(self.dummy_inputs)

                return lm_head.get_bias()
        return None

    def set_bias(self, value):
        """
        Set all the bias in the LM head.

        Args:
            value (:obj:`Dict[tf.Variable]`):
                All the new bias attached to an LM head.
        """
        if self.get_lm_head() is not None:
            lm_head = self.get_lm_head()
            try:
                lm_head.set_bias(value)
            except AttributeError:
                self(self.dummy_inputs)
                lm_head.set_bias(value)

    def get_lm_head(self) -> tf.keras.layers.Layer:
        """
        The LM Head layer. This method must be overwritten by all the models that have a lm head.

        Return:
            :obj:`tf.keras.layers.Layer`: The LM head layer if the model has one, None if not.
        """
1049
1050
        return None

1051
1052
1053
    def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable:
        """
        Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`.
1054

1055
        Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method.
1056

1057
1058
1059
1060
        Arguments:
            new_num_tokens (:obj:`int`, `optional`):
                The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
                vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`,
1061
                just returns a pointer to the input tokens :obj:`tf.Variable` module of the model without doing
1062
1063
1064
1065
                anything.

        Return:
            :obj:`tf.Variable`: Pointer to the input tokens Embeddings Module of the model.
1066
        """
1067
1068
        if new_num_tokens is None or new_num_tokens == self.config.vocab_size:
            return self._get_word_embedding_weight(self.get_input_embeddings())
1069

1070
        model_embeds = self._resize_token_embeddings(new_num_tokens)
1071
1072
1073

        # Update base model and current model config
        self.config.vocab_size = new_num_tokens
1074
1075
1076

        return model_embeds

1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
    def _get_word_embedding_weight(model, embedding_layer):
        embeds = getattr(embedding_layer, "weight", None)
        if embeds is not None:
            return embeds

        embeds = getattr(embedding_layer, "decoder", None)
        if embeds is not None:
            return embeds

        # The reason why the attributes don't exist might be
        # because the model is not built, so retry getting
        # the argument after building the model
        model(model.dummy_inputs)

        embeds = getattr(embedding_layer, "weight", None)
        if embeds is not None:
            return embeds

        embeds = getattr(embedding_layer, "decoder", None)
        if embeds is not None:
            return embeds

        return None
1100

1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
    def _resize_token_embeddings(self, new_num_tokens):
        old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings())
        new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)

        # if word embeddings are not tied, make sure that lm head bias is resized as well
        if self.get_bias() is not None:
            old_lm_head_bias = self.get_bias()
            new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens)

            self.set_bias(new_lm_head_bias)

        # if word embeddings are not tied, make sure that lm head decoder is resized as well
        if self.get_output_embeddings() is not None:
            old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings())
            new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens)

            self.set_output_embeddings(new_lm_head_decoder)

        self.set_input_embeddings(new_embeddings)

        return self.get_input_embeddings()

    def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens):
1124
        """
1125
1126
        Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end.
        Reducing the size will remove vectors from the end
thomwolf's avatar
thomwolf committed
1127
1128

        Args:
1129
1130
            old_lm_head_bias (:obj:`tf.Variable`):
                Old lm head bias to be resized.
1131
            new_num_tokens (:obj:`int`, `optional`):
1132
                New number of tokens in the linear matrix.
1133
1134

                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
1135
                vectors from the end. If not provided or :obj:`None`, just returns None
1136
1137

        Return:
1138
            :obj:`tf.Variable`: Pointer to the resized bias.
thomwolf's avatar
thomwolf committed
1139
        """
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
        new_lm_head_bias = {}

        for attr, weight in old_lm_head_bias.items():
            first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight)
            size_diff = new_num_tokens - old_num_tokens
            final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens]

            # initialize new bias
            if tf.math.greater(size_diff, 0):
                padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]]
                current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1)
                num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
                mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy]
                bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True)
                bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False)
            else:
                slice_from = [0] if first_dim is None else [0, 0]
                current_bias = tf.slice(
                    weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape)
                )
                bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True)
1161

1162
1163
1164
1165
1166
1167
1168
            new_bias = self.add_weight(
                shape=final_shape,
                initializer="zeros",
                trainable=True,
                name=weight.name.split(":")[0],
            )
            init_bias = tf.where(bias_mask, current_bias, new_bias.value())
1169

1170
1171
            new_bias.assign(init_bias)
            new_lm_head_bias[attr] = new_bias
1172

1173
        return new_lm_head_bias
thomwolf's avatar
thomwolf committed
1174

1175
1176
1177
1178
    def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens):
        """
        Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end.
        Reducing the size will remove vectors from the end
thomwolf's avatar
thomwolf committed
1179

1180
1181
1182
1183
1184
        Args:
            old_lm_head_decoder (:obj:`tf.Variable`):
                Old lm head decoder to be resized.
            new_num_tokens (:obj:`int`, `optional`):
                New number of tokens in the linear matrix.
1185

1186
1187
                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
                vectors from the end. If not provided or :obj:`None`, just returns None
1188

1189
        Return:
1190
            :obj:`tf.Variable`: Pointer to the resized decoder or None if the output embeddings are different from the
1191
1192
1193
1194
1195
1196
            input ones.
        """
        new_lm_head_decoder = old_lm_head_decoder
        is_input_output_equals = tf.reduce_any(
            self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder
        )
1197

1198
1199
1200
1201
1202
        if old_lm_head_decoder is not None and not is_input_output_equals:
            old_embedding_dim = shape_list(old_lm_head_decoder)[1]
            decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens)
            new_lm_head_decoder = self.add_weight(
                shape=(new_num_tokens, old_embedding_dim),
1203
1204
                initializer="zeros",
                trainable=True,
1205
                name=old_lm_head_decoder.name.split(":")[0],
1206
            )
1207
1208
1209
            init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value())

            new_lm_head_decoder.assign(init_decoder)
1210

1211
        return new_lm_head_decoder
1212

1213
1214
1215
1216
    def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable:
        """
        Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly
        initialized vectors at the end. Reducing the size will remove vectors from the end
1217

1218
1219
1220
1221
1222
        Args:
            old_embeddings (:obj:`tf.Variable`):
                Old embeddings to be resized.
            new_num_tokens (:obj:`int`, `optional`):
                New number of tokens in the embedding matrix.
1223

1224
1225
1226
                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
                vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
                :obj:`tf.Variable`` module of the model without doing anything.
1227

1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
        Return:
            :obj:`tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if
            :obj:`new_num_tokens` is :obj:`None`
        """
        old_embedding_dim = shape_list(old_embeddings)[1]
        init_range = getattr(self.config, "initializer_range", 0.02)
        embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens)
        new_embeddings = self.add_weight(
            name=old_embeddings.name.split(":")[0],
            shape=[new_num_tokens, old_embedding_dim],
            initializer=get_initializer(init_range),
            dtype=tf.float32,
        )
        init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value())
1242

1243
        new_embeddings.assign(init_embeddings)
1244

1245
        return new_embeddings
thomwolf's avatar
thomwolf committed
1246
1247

    def prune_heads(self, heads_to_prune):
1248
1249
        """
        Prunes heads of the base model.
thomwolf's avatar
thomwolf committed
1250

1251
1252
        Arguments:
            heads_to_prune (:obj:`Dict[int, List[int]]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1253
1254
1255
                Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of
                heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads
                0 and 2 on layer 1 and heads 2 and 3 on layer 2.
thomwolf's avatar
thomwolf committed
1256
1257
1258
        """
        raise NotImplementedError

Sylvain Gugger's avatar
Sylvain Gugger committed
1259
    def save_pretrained(self, save_directory, saved_model=False, version=1, push_to_hub=False, **kwargs):
1260
1261
        """
        Save a model and its configuration file to a directory, so that it can be re-loaded using the
Sylvain Gugger's avatar
Sylvain Gugger committed
1262
        :func:`~transformers.TFPreTrainedModel.from_pretrained` class method.
1263
1264
1265
1266

        Arguments:
            save_directory (:obj:`str`):
                Directory to which to save. Will be created if it doesn't exist.
Julien Plu's avatar
Julien Plu committed
1267
1268
1269
1270
1271
1272
            saved_model (:obj:`bool`, `optional`, defaults to :obj:`False`):
                If the model has to be saved in saved model format as well or not.
            version (:obj:`int`, `optional`, defaults to 1):
                The version of the saved model. A saved model needs to be versioned in order to be properly loaded by
                TensorFlow Serving as detailed in the official documentation
                https://www.tensorflow.org/tfx/serving/serving_basic
Sylvain Gugger's avatar
Sylvain Gugger committed
1273
1274
            push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to push your model to the Hugging Face model hub after saving it.
1275
1276
1277
1278
1279
1280
1281
1282

                .. warning::

                    Using :obj:`push_to_hub=True` will synchronize the repository you are pushing to with
                    :obj:`save_directory`, which requires :obj:`save_directory` to be a local clone of the repo you are
                    pushing to if it's an existing folder. Pass along :obj:`temp_dir=True` to use a temporary directory
                    instead.

Sylvain Gugger's avatar
Sylvain Gugger committed
1283
1284
1285
            kwargs:
                Additional key word arguments passed along to the
                :meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method.
thomwolf's avatar
thomwolf committed
1286
        """
1287
        if os.path.isfile(save_directory):
1288
            logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
1289
            return
1290
1291
1292
1293
1294

        if push_to_hub:
            commit_message = kwargs.pop("commit_message", None)
            repo = self._create_or_get_repo(save_directory, **kwargs)

1295
        os.makedirs(save_directory, exist_ok=True)
thomwolf's avatar
thomwolf committed
1296

Julien Plu's avatar
Julien Plu committed
1297
1298
1299
1300
1301
        if saved_model:
            saved_model_dir = os.path.join(save_directory, "saved_model", str(version))
            self.save(saved_model_dir, include_optimizer=False, signatures=self.serving)
            logger.info(f"Saved model created in {saved_model_dir}")

thomwolf's avatar
thomwolf committed
1302
        # Save configuration file
1303
        self.config.architectures = [self.__class__.__name__[2:]]
thomwolf's avatar
thomwolf committed
1304
1305
1306
1307
1308
        self.config.save_pretrained(save_directory)

        # If we save using the predefined names, we can load using `from_pretrained`
        output_model_file = os.path.join(save_directory, TF2_WEIGHTS_NAME)
        self.save_weights(output_model_file)
1309
        logger.info(f"Model weights saved in {output_model_file}")
thomwolf's avatar
thomwolf committed
1310

Sylvain Gugger's avatar
Sylvain Gugger committed
1311
        if push_to_hub:
1312
            url = self._push_to_hub(repo, commit_message=commit_message)
Sylvain Gugger's avatar
Sylvain Gugger committed
1313
1314
            logger.info(f"Model pushed to the hub in this commit: {url}")

thomwolf's avatar
thomwolf committed
1315
1316
    @classmethod
    def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
1317
1318
        r"""
        Instantiate a pretrained TF 2.0 model from a pre-trained model configuration.
thomwolf's avatar
thomwolf committed
1319

1320
1321
1322
        The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come
        pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
        task.
thomwolf's avatar
thomwolf committed
1323

1324
1325
        The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those
        weights are discarded.
thomwolf's avatar
thomwolf committed
1326
1327

        Parameters:
1328
1329
1330
            pretrained_model_name_or_path (:obj:`str`, `optional`):
                Can be either:

1331
1332
1333
                    - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
                      Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
                      a user or organization name, like ``dbmdz/bert-base-german-cased``.
1334
                    - A path to a `directory` containing model weights saved using
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
1335
                      :func:`~transformers.TFPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
Sylvain Gugger's avatar
Sylvain Gugger committed
1336
                    - A path or url to a `PyTorch state_dict save file` (e.g, ``./pt_model/pytorch_model.bin``). In
1337
1338
1339
1340
1341
1342
1343
                      this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided
                      as ``config`` argument. This loading path is slower than converting the PyTorch model in a
                      TensorFlow model using the provided conversion scripts and loading the TensorFlow model
                      afterwards.
                    - :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
                      arguments ``config`` and ``state_dict``).
            model_args (sequence of positional arguments, `optional`):
1344
                All remaining positional arguments will be passed to the underlying model's ``__init__`` method.
1345
1346
1347
1348
1349
1350
            config (:obj:`Union[PretrainedConfig, str]`, `optional`):
                Can be either:

                    - an instance of a class derived from :class:`~transformers.PretrainedConfig`,
                    - a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`.

1351
                Configuration for the model to use instead of an automatically loaded configuration. Configuration can
1352
1353
                be automatically loaded when:

1354
1355
                    - The model is a model provided by the library (loaded with the `model id` string of a pretrained
                      model).
1356
                    - The model was saved using :func:`~transformers.TFPreTrainedModel.save_pretrained` and is reloaded
1357
1358
                      by supplying the save directory.
                    - The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
1359
1360
1361
1362
                      configuration JSON file named `config.json` is found in the directory.
            from_pt: (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Load the model weights from a PyTorch state_dict save file (see docstring of
                ``pretrained_model_name_or_path`` argument).
qqaatw's avatar
qqaatw committed
1363
            ignore_mismatched_sizes (:obj:`bool`, `optional`, defaults to :obj:`False`):
1364
1365
1366
                Whether or not to raise an error if some of the weights from the checkpoint do not have the same size
                as the weights of the model (if for instance, you are instantiating a model with 10 labels from a
                checkpoint with 3 labels).
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
            cache_dir (:obj:`str`, `optional`):
                Path to a directory in which a downloaded pretrained model configuration should be cached if the
                standard cache should not be used.
            force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to delete incompletely received files. Will attempt to resume the download if such a
                file exists.
            proxies: (:obj:`Dict[str, str], `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1377
1378
                A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
1379
            output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1380
                Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
1381
1382
            local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to only look at local files (e.g., not try doanloading the model).
1383
1384
1385
            use_auth_token (:obj:`str` or `bool`, `optional`):
                The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
                generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
Julien Chaumond's avatar
Julien Chaumond committed
1386
1387
1388
1389
            revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
                identifier allowed by git.
1390
            mirror(:obj:`str`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1391
1392
1393
                Mirror source to accelerate downloads in China. If you are from China and have an accessibility
                problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
                Please refer to the mirror site for more information.
1394
1395
            kwargs (remaining dictionary of keyword arguments, `optional`):
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
1396
                :obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
                automatically loaded:

                    - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
                      underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
                      already been done)
                    - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
                      initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
                      ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
                      with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
                      attribute will be passed to the underlying model's ``__init__`` function.
thomwolf's avatar
thomwolf committed
1407

1408
1409
1410
1411
        .. note::

            Passing :obj:`use_auth_token=True` is required when you want to use a private model.

thomwolf's avatar
thomwolf committed
1412
1413
        Examples::

1414
            >>> from transformers import BertConfig, TFBertModel
1415
            >>> # Download model and configuration from huggingface.co and cache.
1416
1417
1418
1419
1420
1421
1422
1423
1424
            >>> model = TFBertModel.from_pretrained('bert-base-uncased')
            >>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable).
            >>> model = TFBertModel.from_pretrained('./test/saved_model/')
            >>> # Update configuration during loading.
            >>> model = TFBertModel.from_pretrained('bert-base-uncased', output_attentions=True)
            >>> assert model.config.output_attentions == True
            >>> # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable).
            >>> config = BertConfig.from_json_file('./pt_model/my_pt_model_config.json')
            >>> model = TFBertModel.from_pretrained('./pt_model/my_pytorch_model.bin', from_pt=True, config=config)
thomwolf's avatar
thomwolf committed
1425
1426

        """
1427
1428
1429
        config = kwargs.pop("config", None)
        cache_dir = kwargs.pop("cache_dir", None)
        from_pt = kwargs.pop("from_pt", False)
1430
        ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False)
1431
1432
1433
1434
        force_download = kwargs.pop("force_download", False)
        resume_download = kwargs.pop("resume_download", False)
        proxies = kwargs.pop("proxies", None)
        output_loading_info = kwargs.pop("output_loading_info", False)
1435
        local_files_only = kwargs.pop("local_files_only", False)
1436
        use_auth_token = kwargs.pop("use_auth_token", None)
Julien Chaumond's avatar
Julien Chaumond committed
1437
        revision = kwargs.pop("revision", None)
1438
        mirror = kwargs.pop("mirror", None)
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
1439
        load_weight_prefix = kwargs.pop("load_weight_prefix", None)
1440
1441
1442
1443
1444
1445
        from_pipeline = kwargs.pop("_from_pipeline", None)
        from_auto_class = kwargs.pop("_from_auto", False)

        user_agent = {"file_type": "model", "framework": "tensorflow", "from_auto_class": from_auto_class}
        if from_pipeline is not None:
            user_agent["using_pipeline"] = from_pipeline
thomwolf's avatar
thomwolf committed
1446

1447
1448
1449
1450
        if is_offline_mode() and not local_files_only:
            logger.info("Offline mode: forcing local_files_only=True")
            local_files_only = True

1451
1452
1453
        # Load config if we don't provide a configuration
        if not isinstance(config, PretrainedConfig):
            config_path = config if config is not None else pretrained_model_name_or_path
thomwolf's avatar
thomwolf committed
1454
            config, model_kwargs = cls.config_class.from_pretrained(
1455
1456
1457
                config_path,
                cache_dir=cache_dir,
                return_unused_kwargs=True,
thomwolf's avatar
thomwolf committed
1458
                force_download=force_download,
1459
                resume_download=resume_download,
1460
1461
                proxies=proxies,
                local_files_only=local_files_only,
1462
                use_auth_token=use_auth_token,
Julien Chaumond's avatar
Julien Chaumond committed
1463
                revision=revision,
1464
1465
                _from_auto=from_auto_class,
                _from_pipeline=from_pipeline,
1466
                **kwargs,
thomwolf's avatar
thomwolf committed
1467
1468
1469
1470
1471
            )
        else:
            model_kwargs = kwargs

        # Load model
thomwolf's avatar
thomwolf committed
1472
        if pretrained_model_name_or_path is not None:
1473
            if os.path.isdir(pretrained_model_name_or_path):
1474
1475
1476
1477
                if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
                    # Load from a PyTorch checkpoint in priority if from_pt
                    archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
                elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
thomwolf's avatar
thomwolf committed
1478
1479
1480
                    # Load from a TF 2.0 checkpoint
                    archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
                else:
1481
                    raise EnvironmentError(
1482
1483
                        f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME]} found in directory "
                        f"{pretrained_model_name_or_path} or `from_pt` set to False"
1484
                    )
Julien Chaumond's avatar
Julien Chaumond committed
1485
            elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
thomwolf's avatar
thomwolf committed
1486
                archive_file = pretrained_model_name_or_path
1487
1488
            elif os.path.isfile(pretrained_model_name_or_path + ".index"):
                archive_file = pretrained_model_name_or_path + ".index"
thomwolf's avatar
thomwolf committed
1489
            else:
thomwolf's avatar
thomwolf committed
1490
                archive_file = hf_bucket_url(
Julien Chaumond's avatar
Julien Chaumond committed
1491
1492
                    pretrained_model_name_or_path,
                    filename=(WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME),
Julien Chaumond's avatar
Julien Chaumond committed
1493
                    revision=revision,
1494
                    mirror=mirror,
thomwolf's avatar
thomwolf committed
1495
                )
thomwolf's avatar
thomwolf committed
1496
1497

            try:
1498
                # Load from URL or cache if already cached
1499
1500
1501
1502
1503
                resolved_archive_file = cached_path(
                    archive_file,
                    cache_dir=cache_dir,
                    force_download=force_download,
                    proxies=proxies,
1504
1505
                    resume_download=resume_download,
                    local_files_only=local_files_only,
1506
                    use_auth_token=use_auth_token,
1507
                    user_agent=user_agent,
1508
                )
Julien Chaumond's avatar
Julien Chaumond committed
1509
1510
            except EnvironmentError as err:
                logger.error(err)
1511
1512
                msg = (
                    f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
1513
1514
                    f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n"
                    f"  (make sure '{pretrained_model_name_or_path}' is not a path to a local directory with something else, in that case)\n\n"
1515
1516
1517
                    f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {TF2_WEIGHTS_NAME}, {WEIGHTS_NAME}.\n\n"
                )
                raise EnvironmentError(msg)
thomwolf's avatar
thomwolf committed
1518
            if resolved_archive_file == archive_file:
1519
                logger.info(f"loading weights file {archive_file}")
thomwolf's avatar
thomwolf committed
1520
            else:
1521
                logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}")
thomwolf's avatar
thomwolf committed
1522
        else:
thomwolf's avatar
thomwolf committed
1523
            resolved_archive_file = None
thomwolf's avatar
thomwolf committed
1524

1525
1526
        config.name_or_path = pretrained_model_name_or_path

Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
1527
1528
1529
1530
1531
        # composed models, *e.g.* TFRag, require special treatment when it comes to loading
        # pre-trained weights.
        if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None:
            model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name")

thomwolf's avatar
thomwolf committed
1532
1533
1534
1535
        # Instantiate model.
        model = cls(config, *model_args, **model_kwargs)

        if from_pt:
Julien Plu's avatar
Julien Plu committed
1536
1537
            from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model

thomwolf's avatar
thomwolf committed
1538
            # Load from a PyTorch checkpoint
thomwolf's avatar
thomwolf committed
1539
            return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True)
thomwolf's avatar
thomwolf committed
1540

Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
1541
1542
1543
1544
1545
1546
        # we might need to extend the variable scope for composite models
        if load_weight_prefix is not None:
            with tf.compat.v1.variable_scope(load_weight_prefix):
                model(model.dummy_inputs)  # build the network with dummy inputs
        else:
            model(model.dummy_inputs)  # build the network with dummy inputs
thomwolf's avatar
thomwolf committed
1547

1548
        assert os.path.isfile(resolved_archive_file), f"Error retrieving file {resolved_archive_file}"
thomwolf's avatar
thomwolf committed
1549
1550
        # 'by_name' allow us to do transfer learning by skipping/adding layers
        # see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357
1551
        try:
1552
1553
1554
1555
1556
1557
            missing_keys, unexpected_keys, mismatched_keys = load_tf_weights(
                model,
                resolved_archive_file,
                ignore_mismatched_sizes=ignore_mismatched_sizes,
                _prefix=load_weight_prefix,
            )
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
        except OSError as e:
            try:
                with open(resolved_archive_file) as f:
                    if f.read().startswith("version"):
                        raise OSError(
                            "You seem to have cloned a repository without having git-lfs installed. Please install "
                            "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder "
                            "you cloned."
                        )
                    else:
                        raise ValueError from e
            except (UnicodeDecodeError, ValueError):
                raise OSError(
                    "Unable to load weights from h5 file. "
                    "If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. "
                )
thomwolf's avatar
thomwolf committed
1574

Julien Plu's avatar
Julien Plu committed
1575
        model(model.dummy_inputs)  # Make sure restore ops are run
thomwolf's avatar
thomwolf committed
1576

1577
1578
        if cls._keys_to_ignore_on_load_missing is not None:
            for pat in cls._keys_to_ignore_on_load_missing:
1579
1580
                missing_keys = [k for k in missing_keys if re.search(pat, k) is None]

1581
1582
        if cls._keys_to_ignore_on_load_unexpected is not None:
            for pat in cls._keys_to_ignore_on_load_unexpected:
Julien Plu's avatar
Julien Plu committed
1583
1584
                unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]

1585
1586
        if len(unexpected_keys) > 0:
            logger.warning(
Julien Plu's avatar
Julien Plu committed
1587
                f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when "
1588
1589
                f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
                f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
1590
                f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
1591
1592
1593
1594
                f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
                f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
            )
        else:
Julien Plu's avatar
Julien Plu committed
1595
1596
            logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n")

thomwolf's avatar
thomwolf committed
1597
        if len(missing_keys) > 0:
1598
            logger.warning(
Julien Plu's avatar
Julien Plu committed
1599
                f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
1600
1601
                f"and are newly initialized: {missing_keys}\n"
                f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
1602
            )
1603
        elif len(mismatched_keys) == 0:
1604
            logger.warning(
Julien Plu's avatar
Julien Plu committed
1605
                f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
1606
                f"If your task is similar to the task the model of the checkpoint was trained on, "
1607
                f"you can already use {model.__class__.__name__} for predictions without further training."
1608
            )
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
        if len(mismatched_keys) > 0:
            mismatched_warning = "\n".join(
                [
                    f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
                    for key, shape1, shape2 in mismatched_keys
                ]
            )
            logger.warning(
                f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
                f"and are newly initialized because the shapes did not match:\n{mismatched_warning}\n"
                f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
            )
Julien Plu's avatar
Julien Plu committed
1621

thomwolf's avatar
thomwolf committed
1622
        if output_loading_info:
1623
1624
1625
1626
1627
            loading_info = {
                "missing_keys": missing_keys,
                "unexpected_keys": unexpected_keys,
                "mismatched_keys": mismatched_keys,
            }
Julien Plu's avatar
Julien Plu committed
1628

thomwolf's avatar
thomwolf committed
1629
1630
            return model, loading_info

thomwolf's avatar
thomwolf committed
1631
        return model
thomwolf's avatar
WIP  
thomwolf committed
1632

1633

1634
1635
1636
1637
1638
1639
1640
# To update the docstring, we need to copy the method, otherwise we change the original docstring.
TFPreTrainedModel.push_to_hub = copy_func(TFPreTrainedModel.push_to_hub)
TFPreTrainedModel.push_to_hub.__doc__ = TFPreTrainedModel.push_to_hub.__doc__.format(
    object="model", object_class="TFAutoModel", object_files="model checkpoint"
)


thomwolf's avatar
WIP  
thomwolf committed
1641
class TFConv1D(tf.keras.layers.Layer):
Sylvain Gugger's avatar
Sylvain Gugger committed
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
    """
    1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).

    Basically works like a linear layer but the weights are transposed.

    Args:
        nf (:obj:`int`):
            The number of output features.
        nx (:obj:`int`):
            The number of input features.
        initializer_range (:obj:`float`, `optional`, defaults to 0.02):
            The standard deviation to use to initialize the weights.
        kwargs:
            Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
    """

thomwolf's avatar
thomwolf committed
1658
    def __init__(self, nf, nx, initializer_range=0.02, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
1659
        super().__init__(**kwargs)
thomwolf's avatar
WIP  
thomwolf committed
1660
        self.nf = nf
thomwolf's avatar
thomwolf committed
1661
        self.nx = nx
thomwolf's avatar
thomwolf committed
1662
        self.initializer_range = initializer_range
thomwolf's avatar
thomwolf committed
1663
1664
1665

    def build(self, input_shape):
        self.weight = self.add_weight(
1666
1667
1668
            "weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range)
        )
        self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer())
thomwolf's avatar
thomwolf committed
1669

thomwolf's avatar
WIP  
thomwolf committed
1670
    def call(self, x):
thomwolf's avatar
thomwolf committed
1671
        bz, sl = shape_list(x)[:2]
thomwolf's avatar
thomwolf committed
1672

thomwolf's avatar
thomwolf committed
1673
        x = tf.reshape(x, [-1, self.nx])
thomwolf's avatar
thomwolf committed
1674
        x = tf.matmul(x, self.weight) + self.bias
thomwolf's avatar
thomwolf committed
1675
1676

        x = tf.reshape(x, [bz, sl, self.nf])
thomwolf's avatar
thomwolf committed
1677

thomwolf's avatar
WIP  
thomwolf committed
1678
        return x
thomwolf's avatar
thomwolf committed
1679
1680


thomwolf's avatar
thomwolf committed
1681
class TFSharedEmbeddings(tf.keras.layers.Layer):
Stas Bekman's avatar
Stas Bekman committed
1682
    r"""
Sylvain Gugger's avatar
Sylvain Gugger committed
1683
    Construct shared token embeddings.
1684

Sylvain Gugger's avatar
Sylvain Gugger committed
1685
1686
    The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language
    modeling.
Sylvain Gugger's avatar
Sylvain Gugger committed
1687
1688
1689

    Args:
        vocab_size (:obj:`int`):
1690
            The size of the vocabulary, e.g., the number of unique tokens.
Sylvain Gugger's avatar
Sylvain Gugger committed
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
        hidden_size (:obj:`int`):
            The size of the embedding vectors.
        initializer_range (:obj:`float`, `optional`):
            The standard deviation to use when initializing the weights. If no value is provided, it will default to
            :math:`1/\sqrt{hidden\_size}`.
        kwargs:
            Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
    """

    def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
1701
        super().__init__(**kwargs)
thomwolf's avatar
thomwolf committed
1702
1703
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
1704
        self.initializer_range = hidden_size ** -0.5 if initializer_range is None else initializer_range
thomwolf's avatar
thomwolf committed
1705
1706

    def build(self, input_shape):
Sylvain Gugger's avatar
Sylvain Gugger committed
1707
1708
1709
        """
        Build shared token embedding layer Shared weights logic adapted from
        https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
thomwolf's avatar
thomwolf committed
1710
1711
        """
        self.weight = self.add_weight(
1712
1713
            "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range)
        )
Julien Chaumond's avatar
Julien Chaumond committed
1714
        super().build(input_shape)
thomwolf's avatar
thomwolf committed
1715

Julien Plu's avatar
Julien Plu committed
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
    def get_config(self):
        config = {
            "vocab_size": self.vocab_size,
            "hidden_size": self.hidden_size,
            "initializer_range": self.initializer_range,
        }
        base_config = super().get_config()

        return dict(list(base_config.items()) + list(config.items()))

Sylvain Gugger's avatar
Sylvain Gugger committed
1726
1727
1728
1729
    def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor:
        """
        Get token embeddings of inputs or decode final hidden state.

thomwolf's avatar
thomwolf committed
1730
        Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
1731
1732
1733
1734
1735
1736
1737
1738
            inputs (:obj:`tf.Tensor`):
                In embedding mode, should be an int64 tensor with shape :obj:`[batch_size, length]`.

                In linear mode, should be a float tensor with shape :obj:`[batch_size, length, hidden_size]`.
            mode (:obj:`str`, defaults to :obj:`"embedding"`):
               A valid value is either :obj:`"embedding"` or :obj:`"linear"`, the first one indicates that the layer
               should be used as an embedding layer, the second one that the layer should be used as a linear decoder.

thomwolf's avatar
thomwolf committed
1739
        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
1740
            :obj:`tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape
Sylvain Gugger's avatar
Sylvain Gugger committed
1741
1742
            :obj:`[batch_size, length, embedding_size]`.

1743
            In linear mode, the output is a float32 with shape :obj:`[batch_size, length, vocab_size]`.
Sylvain Gugger's avatar
Sylvain Gugger committed
1744

thomwolf's avatar
thomwolf committed
1745
        Raises:
Sylvain Gugger's avatar
Sylvain Gugger committed
1746
            ValueError: if :obj:`mode` is not valid.
1747

Sylvain Gugger's avatar
Sylvain Gugger committed
1748
1749
        Shared weights logic is adapted from `here
        <https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24>`__.
thomwolf's avatar
thomwolf committed
1750
1751
1752
1753
1754
1755
        """
        if mode == "embedding":
            return self._embedding(inputs)
        elif mode == "linear":
            return self._linear(inputs)
        else:
1756
            raise ValueError(f"mode {mode} is not valid.")
thomwolf's avatar
thomwolf committed
1757
1758
1759
1760
1761
1762
1763

    def _embedding(self, input_ids):
        """Applies embedding based on inputs tensor."""
        return tf.gather(self.weight, input_ids)

    def _linear(self, inputs):
        """
Julien Plu's avatar
Julien Plu committed
1764
        Computes logits by running inputs through a linear layer.
thomwolf's avatar
thomwolf committed
1765

Julien Plu's avatar
Julien Plu committed
1766
1767
1768
1769
1770
1771
1772
        Args:
            inputs: A float32 tensor with shape [..., hidden_size]

        Returns:
            float32 tensor with shape [..., vocab_size].
        """
        first_dims = shape_list(inputs)[:-1]
thomwolf's avatar
thomwolf committed
1773
1774
1775
1776
1777
1778
        x = tf.reshape(inputs, [-1, self.hidden_size])
        logits = tf.matmul(x, self.weight, transpose_b=True)

        return tf.reshape(logits, first_dims + [self.vocab_size])


thomwolf's avatar
thomwolf committed
1779
class TFSequenceSummary(tf.keras.layers.Layer):
Julien Plu's avatar
Julien Plu committed
1780
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1781
1782
1783
1784
    Compute a single vector summary of a sequence hidden states.

    Args:
        config (:class:`~transformers.PretrainedConfig`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1785
1786
            The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
            config class of your model for the default values it uses):
Sylvain Gugger's avatar
Sylvain Gugger committed
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798

            - **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are:

                - :obj:`"last"` -- Take the last token hidden state (like XLNet)
                - :obj:`"first"` -- Take the first token hidden state (like Bert)
                - :obj:`"mean"` -- Take the mean of all tokens hidden states
                - :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
                - :obj:`"attn"` -- Not implemented now, use multi-head attention

            - **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction.
            - **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to
              :obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`).
Sylvain Gugger's avatar
Sylvain Gugger committed
1799
            - **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the
Sylvain Gugger's avatar
Sylvain Gugger committed
1800
1801
1802
1803
1804
1805
1806
1807
1808
              output, another string or :obj:`None` will add no activation.
            - **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and
              activation.
            - **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and
              activation.

        initializer_range (:obj:`float`, defaults to 0.02): The standard deviation to use to initialize the weights.
        kwargs:
            Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
thomwolf's avatar
thomwolf committed
1809
    """
1810

Sylvain Gugger's avatar
Sylvain Gugger committed
1811
    def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
1812
        super().__init__(**kwargs)
thomwolf's avatar
thomwolf committed
1813

1814
1815
        self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last"
        if self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
1816
1817
1818
1819
1820
            # We should use a standard multi-head attention module with absolute positional embedding for that.
            # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
            # We can probably just use the multi-head attention module of PyTorch >=1.1.0
            raise NotImplementedError

1821
        self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj
1822
        if self.has_summary:
1823
            if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
thomwolf's avatar
thomwolf committed
1824
1825
1826
                num_classes = config.num_labels
            else:
                num_classes = config.hidden_size
1827
1828
1829
            self.summary = tf.keras.layers.Dense(
                num_classes, kernel_initializer=get_initializer(initializer_range), name="summary"
            )
thomwolf's avatar
thomwolf committed
1830

1831
        self.has_activation = hasattr(config, "summary_activation") and config.summary_activation == "tanh"
1832
        if self.has_activation:
1833
            self.activation = tf.keras.activations.tanh
thomwolf's avatar
thomwolf committed
1834

1835
        self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0
1836
        if self.has_first_dropout:
thomwolf's avatar
thomwolf committed
1837
1838
            self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout)

1839
        self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0
1840
        if self.has_last_dropout:
thomwolf's avatar
thomwolf committed
1841
1842
            self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout)

Julien Plu's avatar
Julien Plu committed
1843
    def call(self, inputs, cls_index=None, training=False):
thomwolf's avatar
thomwolf committed
1844
1845
1846
1847
1848
1849
1850
        if not isinstance(inputs, (dict, tuple, list)):
            hidden_states = inputs
        elif isinstance(inputs, (tuple, list)):
            hidden_states = inputs[0]
            cls_index = inputs[1] if len(inputs) > 1 else None
            assert len(inputs) <= 2, "Too many inputs."
        else:
1851
            hidden_states = inputs.get("hidden_states")
1852
            cls_index = inputs.get("cls_index", None)
thomwolf's avatar
thomwolf committed
1853

1854
        if self.summary_type == "last":
thomwolf's avatar
thomwolf committed
1855
            output = hidden_states[:, -1]
1856
        elif self.summary_type == "first":
thomwolf's avatar
thomwolf committed
1857
            output = hidden_states[:, 0]
1858
        elif self.summary_type == "mean":
Lysandre's avatar
Lysandre committed
1859
            output = tf.reduce_mean(hidden_states, axis=1)
1860
        elif self.summary_type == "cls_index":
1861
            hidden_shape = shape_list(hidden_states)  # e.g. [batch, num choices, seq length, hidden dims]
thomwolf's avatar
thomwolf committed
1862
            if cls_index is None:
1863
1864
1865
                cls_index = tf.fill(
                    hidden_shape[:-2], hidden_shape[-2] - 1
                )  # A tensor full of shape [batch] or [batch, num choices] full of sequence length
1866
1867
            cls_shape = shape_list(cls_index)
            if len(cls_shape) <= len(hidden_shape) - 2:
1868
                cls_index = tf.expand_dims(cls_index, axis=-1)
1869
            # else:
1870
1871
            # cls_index = cls_index[..., tf.newaxis]
            # cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),))
thomwolf's avatar
thomwolf committed
1872
            # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
1873
            output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2)
1874
1875
1876
1877
            output = tf.squeeze(
                output, axis=len(hidden_shape) - 2
            )  # shape of output: (batch, num choices, hidden_size)
        elif self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
1878
1879
            raise NotImplementedError

1880
1881
        if self.has_first_dropout:
            output = self.first_dropout(output, training=training)
thomwolf's avatar
thomwolf committed
1882

1883
        if self.has_summary:
1884
            output = self.summary(output)
thomwolf's avatar
thomwolf committed
1885

1886
        if self.has_activation:
thomwolf's avatar
thomwolf committed
1887
1888
            output = self.activation(output)

1889
1890
        if self.has_last_dropout:
            output = self.last_dropout(output, training=training)
thomwolf's avatar
thomwolf committed
1891
1892
1893

        return output

1894

Julien Plu's avatar
Julien Plu committed
1895
def shape_list(tensor: tf.Tensor) -> List[int]:
Sylvain Gugger's avatar
Sylvain Gugger committed
1896
1897
1898
1899
    """
    Deal with dynamic shape in tensorflow cleanly.

    Args:
Julien Plu's avatar
Julien Plu committed
1900
        tensor (:obj:`tf.Tensor`): The tensor we want the shape of.
Sylvain Gugger's avatar
Sylvain Gugger committed
1901
1902
1903
1904

    Returns:
        :obj:`List[int]`: The shape of the tensor as a list.
    """
Julien Plu's avatar
Julien Plu committed
1905
    dynamic = tf.shape(tensor)
Julien Plu's avatar
Julien Plu committed
1906
1907

    if tensor.shape == tf.TensorShape(None):
1908
        return dynamic
Julien Plu's avatar
Julien Plu committed
1909
1910
1911

    static = tensor.shape.as_list()

thomwolf's avatar
thomwolf committed
1912
    return [dynamic[i] if s is None else s for i, s in enumerate(static)]
thomwolf's avatar
thomwolf committed
1913

1914

Sylvain Gugger's avatar
Sylvain Gugger committed
1915
1916
1917
1918
def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal:
    """
    Creates a :obj:`tf.initializers.TruncatedNormal` with the given range.

Julien Chaumond's avatar
Julien Chaumond committed
1919
    Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
1920
1921
        initializer_range (`float`, defaults to 0.02): Standard deviation of the initializer range.

Julien Chaumond's avatar
Julien Chaumond committed
1922
    Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
1923
        :obj:`tf.initializers.TruncatedNormal`: The truncated normal initializer.
Julien Chaumond's avatar
Julien Chaumond committed
1924
1925
    """
    return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
1926
1927


Sam Shleifer's avatar
Sam Shleifer committed
1928
1929
class TFWrappedEmbeddings:
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1930
1931
1932
    this class wraps a the TFSharedEmbeddingTokens layer into a python 'no-keras-layer' class to avoid problem with
    weight restoring. Also it makes sure that the layer is called from the correct scope to avoid problem with
    saving/storing the correct weights
Sam Shleifer's avatar
Sam Shleifer committed
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
    """

    def __init__(self, layer, abs_scope_name=None):
        self._layer = layer
        self._abs_scope_name = abs_scope_name

    def call(self, inputs, mode="embedding"):
        if self._abs_scope_name is None:
            return self._layer.call(inputs, mode)

        # if an abs scope name is given to the embedding variable, call variable from absolute scope
        with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
            with tf.name_scope(abs_scope_name.original_name_scope):
                return self._layer.call(inputs, mode)

    def __call__(self, inputs, mode="embedding"):
        if self._abs_scope_name is None:
            return self._layer(inputs, mode)

        # if an abs scope name is given to the embedding variable, call variable from absolute scope
        with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
            with tf.name_scope(abs_scope_name.original_name_scope):
                return self._layer(inputs, mode)