modeling_tf_utils.py 92 KB
Newer Older
thomwolf's avatar
thomwolf committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF general model utils."""
Julien Plu's avatar
Julien Plu committed
17

18
import functools
Julien Plu's avatar
Julien Plu committed
19
import inspect
thomwolf's avatar
thomwolf committed
20
import os
21
import pickle
22
import re
Julien Plu's avatar
Julien Plu committed
23
import warnings
Sylvain Gugger's avatar
Sylvain Gugger committed
24
from typing import Dict, List, Optional, Union
thomwolf's avatar
thomwolf committed
25

Aymeric Augustin's avatar
Aymeric Augustin committed
26
import h5py
Julien Chaumond's avatar
Julien Chaumond committed
27
import numpy as np
thomwolf's avatar
thomwolf committed
28
import tensorflow as tf
Julien Plu's avatar
Julien Plu committed
29
from tensorflow.python.keras import backend as K
Matt's avatar
Matt committed
30
from tensorflow.python.keras.engine import data_adapter
31
from tensorflow.python.keras.engine.keras_tensor import KerasTensor
thomwolf's avatar
thomwolf committed
32
from tensorflow.python.keras.saving import hdf5_format
thomwolf's avatar
thomwolf committed
33

34
from huggingface_hub import Repository, list_repo_files
35
from requests import HTTPError
36

37
from .activations_tf import get_tf_activation
thomwolf's avatar
thomwolf committed
38
from .configuration_utils import PretrainedConfig
39
from .dynamic_module_utils import custom_object_save
Julien Plu's avatar
Julien Plu committed
40
41
42
43
from .file_utils import (
    DUMMY_INPUTS,
    TF2_WEIGHTS_NAME,
    WEIGHTS_NAME,
44
    EntryNotFoundError,
Julien Plu's avatar
Julien Plu committed
45
    ModelOutput,
Sylvain Gugger's avatar
Sylvain Gugger committed
46
    PushToHubMixin,
47
48
    RepositoryNotFoundError,
    RevisionNotFoundError,
Julien Plu's avatar
Julien Plu committed
49
    cached_path,
50
    copy_func,
51
    has_file,
Julien Plu's avatar
Julien Plu committed
52
    hf_bucket_url,
53
    is_offline_mode,
Julien Plu's avatar
Julien Plu committed
54
55
    is_remote_url,
)
56
from .generation_tf_utils import TFGenerationMixin
57
from .modeling_tf_outputs import TFSeq2SeqLMOutput
58
from .tf_utils import shape_list
Julien Plu's avatar
Julien Plu committed
59
from .tokenization_utils_base import BatchEncoding
Lysandre Debut's avatar
Lysandre Debut committed
60
from .utils import logging
thomwolf's avatar
thomwolf committed
61

Aymeric Augustin's avatar
Aymeric Augustin committed
62

Lysandre Debut's avatar
Lysandre Debut committed
63
logger = logging.get_logger(__name__)
64
tf_logger = tf.get_logger()
thomwolf's avatar
thomwolf committed
65

Julien Plu's avatar
Julien Plu committed
66
TFModelInputType = Union[
67
68
69
70
71
72
73
74
75
    List[tf.Tensor],
    List[np.ndarray],
    List[KerasTensor],
    Dict[str, tf.Tensor],
    Dict[str, np.ndarray],
    Dict[str, KerasTensor],
    tf.Tensor,
    np.ndarray,
    KerasTensor,
Julien Plu's avatar
Julien Plu committed
76
77
]

78

Matt's avatar
Matt committed
79
80
81
82
def dummy_loss(y_true, y_pred):
    return tf.reduce_mean(y_pred)


83
class TFModelUtilsMixin:
Julien Chaumond's avatar
Julien Chaumond committed
84
    """
85
    A few utilities for `tf.keras.Model`, to be used as a mixin.
Julien Chaumond's avatar
Julien Chaumond committed
86
87
88
89
    """

    def num_parameters(self, only_trainable: bool = False) -> int:
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
90
91
92
        Get the number of (optionally, trainable) parameters in the model.

        Args:
93
            only_trainable (`bool`, *optional*, defaults to `False`):
Sylvain Gugger's avatar
Sylvain Gugger committed
94
95
96
                Whether or not to return only the number of trainable parameters

        Returns:
97
            `int`: The number of parameters.
Julien Chaumond's avatar
Julien Chaumond committed
98
99
100
101
102
103
104
        """
        if only_trainable:
            return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables))
        else:
            return self.count_params()


105
def keras_serializable(cls):
106
107
108
109
    """
    Decorate a Keras Layer class to support Keras serialization.

    This is done by:
Sylvain Gugger's avatar
Sylvain Gugger committed
110

111
    1. Adding a `transformers_config` dict to the Keras config dictionary in `get_config` (called by Keras at
Sylvain Gugger's avatar
Sylvain Gugger committed
112
       serialization time.
Sylvain Gugger's avatar
Sylvain Gugger committed
113
114
    2. Wrapping `__init__` to accept that `transformers_config` dict (passed by Keras at deserialization time) and
       convert it to a config object for the actual layer initializer.
Sylvain Gugger's avatar
Sylvain Gugger committed
115
    3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not
116
       need to be supplied in `custom_objects` in the call to `tf.keras.models.load_model`.
Sylvain Gugger's avatar
Sylvain Gugger committed
117
118

    Args:
119
        cls (a `tf.keras.layers.Layers subclass`):
Sylvain Gugger's avatar
Sylvain Gugger committed
120
121
            Typically a `TF.MainLayer` class in this project, in general must accept a `config` argument to its
            initializer.
Sylvain Gugger's avatar
Sylvain Gugger committed
122
123
124

    Returns:
        The same class object, with modifications for Keras deserialization.
125
    """
126
    initializer = cls.__init__
127

128
129
130
131
    config_class = getattr(cls, "config_class", None)
    if config_class is None:
        raise AttributeError("Must set `config_class` to use @keras_serializable")

132
    @functools.wraps(initializer)
133
    def wrapped_init(self, *args, **kwargs):
134
135
136
137
        config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None)

        if isinstance(config, dict):
            config = config_class.from_dict(config)
138
            initializer(self, config, *args, **kwargs)
139
140
141
142
143
        elif isinstance(config, PretrainedConfig):
            if len(args) > 0:
                initializer(self, *args, **kwargs)
            else:
                initializer(self, config, *args, **kwargs)
144
        else:
145
146
147
            raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)")

        self._config = config
Julien Plu's avatar
Julien Plu committed
148
        self._kwargs = kwargs
149

150
151
152
153
154
155
156
157
    cls.__init__ = wrapped_init

    if not hasattr(cls, "get_config"):
        raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses")
    if hasattr(cls.get_config, "_is_default"):

        def get_config(self):
            cfg = super(cls, self).get_config()
158
            cfg["config"] = self._config.to_dict()
Julien Plu's avatar
Julien Plu committed
159
            cfg.update(self._kwargs)
160
161
162
163
            return cfg

        cls.get_config = get_config

164
    cls._keras_serializable = True
165
166
167
    if hasattr(tf.keras.utils, "register_keras_serializable"):
        cls = tf.keras.utils.register_keras_serializable()(cls)
    return cls
168
169


170
class TFCausalLanguageModelingLoss:
Sylvain Gugger's avatar
Sylvain Gugger committed
171
172
173
    """
    Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token.

174
    <Tip>
Sylvain Gugger's avatar
Sylvain Gugger committed
175

176
    Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
Sylvain Gugger's avatar
Sylvain Gugger committed
177

178
    </Tip>
Sylvain Gugger's avatar
Sylvain Gugger committed
179
180
    """

181
    def hf_compute_loss(self, labels, logits):
182
183
184
        loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True, reduction=tf.keras.losses.Reduction.NONE
        )
Muennighoff's avatar
Muennighoff committed
185
        # make sure only labels that are not equal to -100 affect the loss
186
        active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
187
188
189
190
191
        reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
        labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
        return loss_fn(labels, reduced_logits)


Julien Plu's avatar
Julien Plu committed
192
class TFQuestionAnsweringLoss:
Sylvain Gugger's avatar
Sylvain Gugger committed
193
    """
194
    Loss function suitable for question answering.
Sylvain Gugger's avatar
Sylvain Gugger committed
195
196
    """

197
    def hf_compute_loss(self, labels, logits):
Julien Plu's avatar
Julien Plu committed
198
199
200
201
202
203
204
205
206
207
        loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True, reduction=tf.keras.losses.Reduction.NONE
        )
        start_loss = loss_fn(labels["start_position"], logits[0])
        end_loss = loss_fn(labels["end_position"], logits[1])

        return (start_loss + end_loss) / 2.0


class TFTokenClassificationLoss:
Sylvain Gugger's avatar
Sylvain Gugger committed
208
209
210
    """
    Loss function suitable for token classification.

211
    <Tip>
Sylvain Gugger's avatar
Sylvain Gugger committed
212

213
    Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
Sylvain Gugger's avatar
Sylvain Gugger committed
214

215
    </Tip>
Sylvain Gugger's avatar
Sylvain Gugger committed
216
217
    """

218
    def hf_compute_loss(self, labels, logits):
Julien Plu's avatar
Julien Plu committed
219
220
221
        loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True, reduction=tf.keras.losses.Reduction.NONE
        )
222
223
        # make sure only labels that are not equal to -100
        # are taken into account as loss
224
        if tf.math.reduce_any(labels == -1):
225
            tf.print("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.")
Julien Plu's avatar
Julien Plu committed
226
227
228
            active_loss = tf.reshape(labels, (-1,)) != -1
        else:
            active_loss = tf.reshape(labels, (-1,)) != -100
Julien Plu's avatar
Julien Plu committed
229
230
231
232
233
234
235
        reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
        labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)

        return loss_fn(labels, reduced_logits)


class TFSequenceClassificationLoss:
Sylvain Gugger's avatar
Sylvain Gugger committed
236
237
238
239
    """
    Loss function suitable for sequence classification.
    """

240
    def hf_compute_loss(self, labels, logits):
241
        if len(shape_list(logits)) == 1 or shape_list(logits)[1] == 1:
Julien Plu's avatar
Julien Plu committed
242
243
244
245
246
247
248
249
250
            loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)
        else:
            loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
                from_logits=True, reduction=tf.keras.losses.Reduction.NONE
            )

        return loss_fn(labels, logits)


Matt's avatar
Matt committed
251
class TFMultipleChoiceLoss:
Sylvain Gugger's avatar
Sylvain Gugger committed
252
253
    """Loss function suitable for multiple choice tasks."""

254
    def hf_compute_loss(self, labels, logits):
Matt's avatar
Matt committed
255
256
257
258
259
        loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True, reduction=tf.keras.losses.Reduction.NONE
        )
        return loss_fn(labels, logits)

Sylvain Gugger's avatar
Sylvain Gugger committed
260
261
262

class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss):
    """
Lysandre's avatar
Lysandre committed
263
    Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens.
Sylvain Gugger's avatar
Sylvain Gugger committed
264

265
    <Tip>
Sylvain Gugger's avatar
Sylvain Gugger committed
266

267
268
269
    Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.

    </Tip>
Lysandre's avatar
Lysandre committed
270
    """
Julien Plu's avatar
Julien Plu committed
271
272


273
274
275
276
class TFNextSentencePredictionLoss:
    """
    Loss function suitable for next sentence prediction (NSP), that is, the task of guessing the next sentence.

277
278
279
280
281
    <Tip>

    Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.

    </Tip>
282
283
    """

284
    def hf_compute_loss(self, labels, logits):
285
286
287
288
289
290
291
292
293
294
295
296
        loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
            from_logits=True, reduction=tf.keras.losses.Reduction.NONE
        )
        # make sure only labels that are not equal to -100
        # are taken into account as loss
        next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
        next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss)
        next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss)

        return loss_fn(next_sentence_label, next_sentence_reduced_logits)


297
298
299
300
301
302
def booleans_processing(config, **kwargs):
    """
    Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or
    graph)

    Args:
303
        config ([`PretrainedConfig`]):
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
            The config of the running model.
        **kwargs:
            The boolean parameters

    Returns:
        A dictionary with the proper values for each boolean
    """
    final_booleans = {}

    if tf.executing_eagerly():
        final_booleans["output_attentions"] = (
            kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions
        )
        final_booleans["output_hidden_states"] = (
            kwargs["output_hidden_states"]
            if kwargs["output_hidden_states"] is not None
            else config.output_hidden_states
        )
Julien Plu's avatar
Julien Plu committed
322
323
324
        final_booleans["return_dict"] = (
            kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict
        )
325
326

        if "use_cache" in kwargs:
327
328
329
            final_booleans["use_cache"] = (
                kwargs["use_cache"] if kwargs["use_cache"] is not None else getattr(config, "use_cache", None)
            )
330
331
332
333
    else:
        final_booleans["output_attentions"] = config.output_attentions
        final_booleans["output_hidden_states"] = config.output_hidden_states

334
        if kwargs.get("return_dict", None) not in (None, True):
335
336
337
            tf_logger.warning(
                "The parameter `return_dict` cannot be set in graph mode and will always be set to `True`."
            )
Julien Plu's avatar
Julien Plu committed
338
        final_booleans["return_dict"] = True
339
340

        if "use_cache" in kwargs:
341
            final_booleans["use_cache"] = getattr(config, "use_cache", None)
342
343
344
345
346
347

    return final_booleans


def input_processing(func, config, input_ids, **kwargs):
    """
Julien Plu's avatar
Julien Plu committed
348
349
350
    Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input
    has to be named accordingly to the parameters name, i.e. `input_ids = tf.keras.Input(shape=(128,), dtype='int32',
    name="input_ids")` otherwise the order of the tensors will not be guaranteed during the training.
351
352

    Args:
353
        func (`callable`):
354
            The callable function of the TensorFlow model.
355
        config ([`PretrainedConfig`]):
356
357
358
359
360
361
362
            The config of the running model.
        **kwargs:
            The inputs of the model.

    Returns:
        Two lists, one for the missing layers, and another one for the unexpected layers.
    """
Julien Plu's avatar
Julien Plu committed
363
364
    signature = dict(inspect.signature(func).parameters)
    signature.pop("kwargs", None)
Julien Plu's avatar
Julien Plu committed
365
    signature.pop("self", None)
Julien Plu's avatar
Julien Plu committed
366
367
    parameter_names = list(signature.keys())
    output = {}
368
    allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray, KerasTensor)
Julien Plu's avatar
Julien Plu committed
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384

    if "inputs" in kwargs["kwargs_call"]:
        warnings.warn(
            "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
            FutureWarning,
        )

        output["input_ids"] = kwargs["kwargs_call"].pop("inputs")

    if "decoder_cached_states" in kwargs["kwargs_call"]:
        warnings.warn(
            "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
            FutureWarning,
        )
        output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states")

385
386
387
388
389
390
391
392
393
    if "past" in kwargs["kwargs_call"] and "past_key_values" in kwargs:
        warnings.warn(
            "The `past` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
            FutureWarning,
        )
        kwargs["past_key_values"] = kwargs["kwargs_call"].pop("past")
    elif "past_key_values" in kwargs["kwargs_call"] and "past" in kwargs:
        kwargs["past"] = kwargs["kwargs_call"].pop("past_key_values")

Julien Plu's avatar
Julien Plu committed
394
395
396
397
398
    if len(kwargs["kwargs_call"]) > 0:
        raise ValueError(
            f"The following keyword arguments are not supported by this model: {list(kwargs['kwargs_call'].keys())}."
        )

Julien Plu's avatar
Julien Plu committed
399
400
    kwargs.pop("kwargs_call")

Julien Plu's avatar
Julien Plu committed
401
402
403
404
    for k, v in kwargs.items():
        if isinstance(v, allowed_types) or v is None:
            output[k] = v
        else:
Julien Plu's avatar
Julien Plu committed
405
            raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
Julien Plu's avatar
Julien Plu committed
406
407
408
409
410

    if isinstance(input_ids, (tuple, list)):
        for i, input in enumerate(input_ids):
            # EagerTensors don't allow to use the .name property so we check for a real Tensor
            if type(input) == tf.Tensor:
Julien Plu's avatar
Julien Plu committed
411
412
                # Tensor names have always the pattern `name:id` then we check only the
                # `name` part
Julien Plu's avatar
Julien Plu committed
413
414
415
416
417
                tensor_name = input.name.split(":")[0]

                if tensor_name in parameter_names:
                    output[tensor_name] = input
                else:
Julien Plu's avatar
Julien Plu committed
418
                    output[parameter_names[i]] = input
Julien Plu's avatar
Julien Plu committed
419
420
421
422
            elif isinstance(input, allowed_types) or input is None:
                output[parameter_names[i]] = input
            else:
                raise ValueError(
Julien Plu's avatar
Julien Plu committed
423
                    f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}."
Julien Plu's avatar
Julien Plu committed
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
                )
    elif isinstance(input_ids, (dict, BatchEncoding)):
        if "inputs" in input_ids:
            warnings.warn(
                "The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
                FutureWarning,
            )

            output["input_ids"] = input_ids.pop("inputs")

        if "decoder_cached_states" in input_ids:
            warnings.warn(
                "The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
                FutureWarning,
            )
            output["past_key_values"] = input_ids.pop("decoder_cached_states")

        for k, v in dict(input_ids).items():
442
            if isinstance(v, allowed_types) or v is None:
Julien Plu's avatar
Julien Plu committed
443
                output[k] = v
444
            elif k not in parameter_names and "args" not in parameter_names:
445
                logger.warning(
446
447
448
449
                    f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored."
                )
                continue
            else:
Julien Plu's avatar
Julien Plu committed
450
                raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
Julien Plu's avatar
Julien Plu committed
451
    else:
452
        if isinstance(input_ids, (tf.Tensor, KerasTensor)) or input_ids is None:
Julien Plu's avatar
Julien Plu committed
453
454
455
            output[parameter_names[0]] = input_ids
        else:
            raise ValueError(
Julien Plu's avatar
Julien Plu committed
456
                f"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}."
Julien Plu's avatar
Julien Plu committed
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
            )

    for name in parameter_names:
        if name not in list(output.keys()) and name != "args":
            output[name] = kwargs.pop(name, signature[name].default)

    # When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)
    # So to respect the proper output we have to add this exception
    if "args" in output:
        if output["args"] is not None and type(output["args"]) == tf.Tensor:
            tensor_name = output["args"].name.split(":")[0]
            output[tensor_name] = output["args"]
        else:
            # `args` in this case is always the first parameter, then `input_ids`
            output["input_ids"] = output["args"]

        del output["args"]

    if "kwargs" in output:
        del output["kwargs"]

478
479
480
481
482
483
484
485
486
487
488
489
490
    boolean_dict = {
        k: v
        for k, v in output.items()
        if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"]
    }

    output.update(
        booleans_processing(
            config=config,
            **boolean_dict,
        )
    )

Julien Plu's avatar
Julien Plu committed
491
492
493
    return output


494
def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):
Julien Plu's avatar
Julien Plu committed
495
    """
Julien Plu's avatar
Julien Plu committed
496
    Detect missing and unexpected layers and load the TF weights accordingly to their names and shapes.
Julien Plu's avatar
Julien Plu committed
497
498

    Args:
499
        model (`tf.keras.models.Model`):
Julien Plu's avatar
Julien Plu committed
500
            The model to load the weights into.
501
        resolved_archive_file (`str`):
Julien Plu's avatar
Julien Plu committed
502
            The location of the H5 file.
503
        ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):
504
            Whether or not to ignore weights with shapes that don't match between the checkpoint of the model.
Julien Plu's avatar
Julien Plu committed
505
506

    Returns:
507
508
        Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the
        mismatched layers.
Julien Plu's avatar
Julien Plu committed
509
510
511
    """
    missing_layers = []
    unexpected_layers = []
512
    mismatched_layers = []
Julien Plu's avatar
Julien Plu committed
513

Julien Plu's avatar
Julien Plu committed
514
    # Read the H5 file
Julien Plu's avatar
Julien Plu committed
515
    with h5py.File(resolved_archive_file, "r") as f:
Julien Plu's avatar
Julien Plu committed
516
517
        # Retrieve the name of each layer from the H5 file
        saved_h5_model_layers_name = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names"))
Julien Plu's avatar
Julien Plu committed
518

Julien Plu's avatar
Julien Plu committed
519
520
        # Find the missing layers from the high level list of layers
        missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name)
Julien Plu's avatar
Julien Plu committed
521

Julien Plu's avatar
Julien Plu committed
522
523
524
525
        # Find the unexpected layers from the high level list of layers
        unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers]))
        saved_weight_names_set = set()
        symbolic_weights_names = set()
Julien Plu's avatar
Julien Plu committed
526
527
        weight_value_tuples = []

Julien Plu's avatar
Julien Plu committed
528
529
        # Compute missing and unexpected sub layers
        # Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...]
Julien Plu's avatar
Julien Plu committed
530
        for layer in model.layers:
Julien Plu's avatar
Julien Plu committed
531
532
533
534
535
            # if layer_name from the H5 file belongs to the layers from the instantiated model
            if layer.name in saved_h5_model_layers_name:
                # Get the H5 layer object from its name
                h5_layer_object = f[layer.name]
                # Get all the weights as a list from the layer object
Julien Plu's avatar
Julien Plu committed
536
                symbolic_weights = layer.trainable_weights + layer.non_trainable_weights
Julien Plu's avatar
Julien Plu committed
537
                saved_weights = {}
Julien Plu's avatar
Julien Plu committed
538

Julien Plu's avatar
Julien Plu committed
539
540
541
542
                # Create a dict from the H5 saved model that looks like {"weight_name": weight_value}
                # And a set with only the names
                for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, "weight_names"):
                    # TF names always start with the model name so we ignore it
Julien Plu's avatar
Julien Plu committed
543
                    name = "/".join(weight_name.split("/")[1:])
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
544
545
546
547

                    if _prefix is not None:
                        name = _prefix + "/" + name

Julien Plu's avatar
Julien Plu committed
548
                    saved_weights[name] = np.asarray(h5_layer_object[weight_name])
Julien Plu's avatar
Julien Plu committed
549

Julien Plu's avatar
Julien Plu committed
550
551
552
553
                    # Add the updated name to the final list for computing missing/unexpected values
                    saved_weight_names_set.add(name)

                # Loop over each weights from the instantiated model and compare with the weights from the H5 file
Julien Plu's avatar
Julien Plu committed
554
                for symbolic_weight in symbolic_weights:
Julien Plu's avatar
Julien Plu committed
555
                    # TF names always start with the model name so we ignore it
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
556
557
558
559
560
561
562
563
                    if _prefix is not None:
                        delimeter = len(_prefix.split("/"))
                        symbolic_weight_name = "/".join(
                            symbolic_weight.name.split("/")[:delimeter]
                            + symbolic_weight.name.split("/")[delimeter + 1 :]
                        )
                    else:
                        symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:])
Julien Plu's avatar
Julien Plu committed
564
565
566
567
568

                    # here we check if the current weight is among the weights from the H5 file
                    # If yes, get the weight_value of the corresponding weight from the H5 file
                    # If not, make the value to None
                    saved_weight_value = saved_weights.get(symbolic_weight_name, None)
Julien Plu's avatar
Julien Plu committed
569

Julien Plu's avatar
Julien Plu committed
570
571
                    # Add the updated name to the final list for computing missing/unexpected values
                    symbolic_weights_names.add(symbolic_weight_name)
Julien Plu's avatar
Julien Plu committed
572

Julien Plu's avatar
Julien Plu committed
573
574
575
                    # If the current weight is found
                    if saved_weight_value is not None:
                        # Check if the shape of the current weight and the one from the H5 file are different
Julien Plu's avatar
Julien Plu committed
576
                        if K.int_shape(symbolic_weight) != saved_weight_value.shape:
Julien Plu's avatar
Julien Plu committed
577
578
                            # If yes we reshape the weight from the H5 file accordingly to the current weight
                            # If the two shapes are not compatible we raise an issue
Julien Plu's avatar
Julien Plu committed
579
580
                            try:
                                array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight))
581
582
583
584
585
586
587
588
                            except ValueError as e:
                                if ignore_mismatched_sizes:
                                    mismatched_layers.append(
                                        (symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight))
                                    )
                                    continue
                                else:
                                    raise e
Julien Plu's avatar
Julien Plu committed
589
590
591
                        else:
                            array = saved_weight_value

Julien Plu's avatar
Julien Plu committed
592
                        # We create the tuple that will be loaded and add it to the final list
Julien Plu's avatar
Julien Plu committed
593
594
                        weight_value_tuples.append((symbolic_weight, array))

Julien Plu's avatar
Julien Plu committed
595
    # Load all the weights
Julien Plu's avatar
Julien Plu committed
596
597
    K.batch_set_value(weight_value_tuples)

Julien Plu's avatar
Julien Plu committed
598
599
600
601
    # Compute the missing and unexpected layers
    missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set))
    unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names))

602
    return missing_layers, unexpected_layers, mismatched_layers
Julien Plu's avatar
Julien Plu committed
603

Julien Plu's avatar
Julien Plu committed
604

605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
def init_copy_embeddings(old_embeddings, new_num_tokens):
    r"""
    This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case
    new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be
    kept or not. Example:

        - if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4]

            -  mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1]
        - if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5]

            - mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4]
    """
    old_num_tokens, old_embedding_dim = shape_list(old_embeddings)
    size_diff = new_num_tokens - old_num_tokens

    # initialize new embeddings
    # Copy token embeddings from the previous ones
    if tf.math.greater(size_diff, 0):
        # if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size
        # and we create a mask to properly identify the padded values and be replaced by the values of the newly created
        # embeddings
        current_weights = tf.pad(
            old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1
        )
        num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
        mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True)
        mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False)
    else:
        # if the new size if lower than the old one, we take the current embeddings until the new size
        current_weights = tf.slice(
            old_embeddings.value(),
            tf.convert_to_tensor([0, 0]),
            tf.convert_to_tensor([new_num_tokens, old_embedding_dim]),
        )
        mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True)

    return mask, current_weights


Sylvain Gugger's avatar
Sylvain Gugger committed
645
class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin):
646
647
    r"""
    Base class for all TF models.
thomwolf's avatar
thomwolf committed
648

Sylvain Gugger's avatar
Sylvain Gugger committed
649
650
    [`TFPreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading,
    downloading and saving models as well as a few methods common to all models to:
thomwolf's avatar
thomwolf committed
651

652
653
        - resize the input embeddings,
        - prune heads in the self-attention heads.
thomwolf's avatar
thomwolf committed
654

655
    Class attributes (overridden by derived classes):
Sylvain Gugger's avatar
Sylvain Gugger committed
656

Sylvain Gugger's avatar
Sylvain Gugger committed
657
658
659
660
661
662
        - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class
          for this model architecture.
        - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived
          classes of the same architecture adding modules on top of the base model.
        - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP
          models, `pixel_values` for vision models and `input_values` for speech models).
thomwolf's avatar
thomwolf committed
663
664
665
    """
    config_class = None
    base_model_prefix = ""
666
    main_input_name = "input_ids"
667
    _auto_class = None
668

669
670
671
672
673
674
    # a list of re pattern of tensor names to ignore from the model when loading the model weights
    # (and avoid unnecessary warnings).
    _keys_to_ignore_on_load_missing = None
    # a list of re pattern of tensor names to ignore from the weights when loading the model weights
    # (and avoid unnecessary warnings).
    _keys_to_ignore_on_load_unexpected = None
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
675
    _requires_load_weight_prefix = False
thomwolf's avatar
thomwolf committed
676

677
    @property
678
679
    def dummy_inputs(self) -> Dict[str, tf.Tensor]:
        """
Julien Plu's avatar
Julien Plu committed
680
681
682
        Dummy inputs to build the network.

        Returns:
683
            `Dict[str, tf.Tensor]`: The dummy inputs.
684
        """
Julien Plu's avatar
Julien Plu committed
685
686
687
        return {
            "input_ids": tf.constant(DUMMY_INPUTS),
        }
thomwolf's avatar
thomwolf committed
688

689
690
691
692
693
694
695
    @property
    def framework(self) -> str:
        """
        :str: Identifies that this is a TensorFlow model.
        """
        return "tf"

thomwolf's avatar
thomwolf committed
696
    def __init__(self, config, *inputs, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
697
        super().__init__(*inputs, **kwargs)
thomwolf's avatar
thomwolf committed
698
699
        if not isinstance(config, PretrainedConfig):
            raise ValueError(
700
701
702
                f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
                "`PretrainedConfig`. To create a model from a pretrained model use "
                f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
703
            )
704
        # Save config and origin of the pretrained weights if given in model
thomwolf's avatar
thomwolf committed
705
        self.config = config
706
        self.name_or_path = config.name_or_path
thomwolf's avatar
thomwolf committed
707

708
    def get_config(self):
709
        return self.config.to_dict()
710
711
712

    @classmethod
    def from_config(cls, config, **kwargs):
713
714
715
        if isinstance(config, PretrainedConfig):
            return cls._from_config(config, **kwargs)
        return cls._from_config(cls.config_class.from_dict(config, **kwargs))
716

717
718
719
720
721
722
723
    @classmethod
    def _from_config(cls, config, **kwargs):
        """
        All context managers that the model should be initialized under go here.
        """
        return cls(config, **kwargs)

Julien Plu's avatar
Julien Plu committed
724
725
726
727
728
729
730
731
732
733
734
735
736
737
    @tf.function(
        input_signature=[
            {
                "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
                "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
                "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
            }
        ]
    )
    def serving(self, inputs):
        """
        Method used for serving the model.

        Args:
738
            inputs (`Dict[str, tf.Tensor]`):
739
                The input of the saved model as a dictionary of tensors.
Julien Plu's avatar
Julien Plu committed
740
741
742
743
744
745
746
747
748
749
        """
        output = self.call(inputs)

        return self.serving_output(output)

    def serving_output(output):
        """
        Prepare the output of the saved model. Each model must implement this function.

        Args:
750
            output ([`TFBaseModelOutput`]):
Julien Plu's avatar
Julien Plu committed
751
752
753
754
                The output returned by the model.
        """
        raise NotImplementedError

755
    def get_input_embeddings(self) -> tf.keras.layers.Layer:
756
        """
757
        Returns the model's input embeddings layer.
758
759

        Returns:
760
            `tf.Variable`: The embeddings layer mapping vocabulary to hidden states.
761
        """
762
        main_layer = getattr(self, self.base_model_prefix, self)
Julien Plu's avatar
Julien Plu committed
763

764
765
        if main_layer is not self:
            return main_layer.get_input_embeddings()
766
767
768
        else:
            raise NotImplementedError

769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
    def _save_checkpoint(self, checkpoint_dir, epoch):
        if not os.path.isdir(checkpoint_dir):
            os.mkdir(checkpoint_dir)
        # We avoid tf.train.checkpoint or saving weights in TF format, even though that includes optimizer
        # state for us, because it requires special handling for objects like custom losses, which we use
        # internally and which users are likely to use too
        weights_path = os.path.join(checkpoint_dir, "weights.h5")
        self.save_weights(weights_path)
        extra_data = {"epoch": epoch, "optimizer_state": self.optimizer.get_weights()}
        extra_data_path = os.path.join(checkpoint_dir, "extra_data.pickle")
        with open(extra_data_path, "wb") as f:
            pickle.dump(extra_data, f)

    def load_repo_checkpoint(self, repo_path_or_name):
        """
        Loads a saved checkpoint (model weights and optimizer state) from a repo. Returns the current epoch count when
        the checkpoint was made.

        Args:
788
            repo_path_or_name (`str`):
789
790
791
792
                Can either be a repository name for your {object} in the Hub or a path to a local folder (in which case
                the repository will have the name of that local folder).

        Returns:
793
            `dict`: A dictionary of extra metadata from the checkpoint, most commonly an "epoch" count.
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
        """
        if getattr(self, "optimizer", None) is None:
            raise RuntimeError(
                "Checkpoint loading failed as no optimizer is attached to the model. "
                "This is most likely caused by the model not being compiled."
            )
        if not os.path.isdir(repo_path_or_name):
            # If this isn't a local path, check that the remote repo exists and has a checkpoint in it
            repo_files = list_repo_files(repo_path_or_name)
            for file in ("checkpoint/weights.h5", "checkpoint/extra_data.pickle"):
                if file not in repo_files:
                    raise FileNotFoundError(f"Repo {repo_path_or_name} does not contain checkpoint file {file}!")
            if "/" not in repo_path_or_name:
                model_id = repo_path_or_name
                repo_path_or_name = self.get_full_repo_name(repo_path_or_name)
            else:
                model_id = repo_path_or_name.split("/")[-1]
            repo = Repository(model_id, clone_from=f"https://huggingface.co/{repo_path_or_name}")
            local_dir = repo.local_dir
        else:
            local_dir = repo_path_or_name

        # Now make sure the repo actually has a checkpoint in it.
        checkpoint_dir = os.path.join(local_dir, "checkpoint")
        weights_file = os.path.join(checkpoint_dir, "weights.h5")
        if not os.path.isfile(weights_file):
            raise FileNotFoundError(f"Could not find checkpoint file weights.h5 in repo {repo_path_or_name}!")
        extra_data_file = os.path.join(checkpoint_dir, "extra_data.pickle")
        if not os.path.isfile(extra_data_file):
            raise FileNotFoundError(f"Could not find checkpoint file extra_data.pickle in repo {repo_path_or_name}!")

        # Assuming the repo is real and we got a checkpoint, load the weights and the optimizer state into the model.
        # The optimizer state includes the iteration count, so learning rate schedules should resume as normal too.
        self.load_weights(weights_file)
        with open(extra_data_file, "rb") as f:
            extra_data = pickle.load(f)
        self.optimizer.set_weights(extra_data["optimizer_state"])

        # Finally, return the epoch number from the checkpoint. This isn't a property of the model, so we can't
        # set it directly, but the user can pass it to fit().
        return {"epoch": extra_data["epoch"]}

Matt's avatar
Matt committed
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
    def compile(
        self,
        optimizer="rmsprop",
        loss="passthrough",
        metrics=None,
        loss_weights=None,
        weighted_metrics=None,
        run_eagerly=None,
        steps_per_execution=None,
        **kwargs
    ):
        """
        This is a thin wrapper that sets the model's loss output head as the loss if the user does not specify a loss
        function themselves.
        """
        if loss == "passthrough":
            logger.warning(
                "No loss specified in compile() - the model's internal loss computation will be used as the "
                "loss. Don't panic - this is a common way to train TensorFlow models in Transformers! "
855
                "Please ensure your labels are passed as keys in the input dict so that they are "
Matt's avatar
Matt committed
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
                "accessible to the model during the forward pass. To disable this behaviour, please pass a "
                "loss argument, or explicitly pass loss=None if you do not want your model to compute a loss."
            )
            loss = {"loss": dummy_loss}
        super().compile(
            optimizer=optimizer,
            loss=loss,
            metrics=metrics,
            loss_weights=loss_weights,
            weighted_metrics=weighted_metrics,
            run_eagerly=run_eagerly,
            steps_per_execution=steps_per_execution,
            **kwargs,
        )

871
872
873
874
875
876
877
878
879
880
881
882
883
884
    def compute_loss(self, *args, **kwargs):
        if hasattr(tf.keras.Model, "compute_loss"):
            # This will be true in TF 2.8 or greater
            return super().compute_loss(*args, **kwargs)
        else:
            warnings.warn(
                "The old compute_loss method is deprecated as it conflicts with the Keras compute_loss "
                "method added in TF 2.8. If you want the original HF compute_loss, please call "
                "hf_compute_loss() instead. From TF versions >= 2.8, or Transformers versions >= 5, "
                "calling compute_loss() will get the Keras method instead.",
                FutureWarning,
            )
            return self.hf_compute_loss(*args, **kwargs)

Matt's avatar
Matt committed
885
886
    def train_step(self, data):
        """
887
        A modification of Keras's default `train_step` that cleans up the printed metrics when we use a dummy loss.
Matt's avatar
Matt committed
888
889
890
891
892
893
894
895
896
        """
        # These are the only transformations `Model.fit` applies to user-input
        # data when a `tf.data.Dataset` is provided.
        data = data_adapter.expand_1d(data)
        x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
        # These next two lines differ from the base method - they avoid issues when the labels are in
        # the input dict (and loss is computed internally)
        if y is None and "labels" in x:
            y = x["labels"]  # Stops confusion with metric computations
Matt's avatar
Matt committed
897
898
899
        elif y is None and "input_ids" in x:
            # Just make any kind of dummy array to make loss work
            y = tf.zeros(tf.shape(x["input_ids"])[0], dtype=tf.int64)
Matt's avatar
Matt committed
900
901
902
903
904
905
        # Run forward pass.
        with tf.GradientTape() as tape:
            y_pred = self(x, training=True)
            loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses)
        # Run backwards pass.
        self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
906
907
908
909
910
        # When y_pred is a ModelOutput and y is a tf.Tensor the metrics update
        # should be done only with the relevant ModelOutput param that is
        # considered by the loss.
        if isinstance(y_pred, TFSeq2SeqLMOutput) and isinstance(y, tf.Tensor):
            y_pred = y_pred["logits"]
Matt's avatar
Matt committed
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
        self.compiled_metrics.update_state(y, y_pred, sample_weight)
        # Collect metrics to return
        return_metrics = {}
        for metric in self.metrics:
            result = metric.result()
            if isinstance(result, dict):
                return_metrics.update(result)
            else:
                return_metrics[metric.name] = result
        # These next two lines are also not in the base method - they correct the displayed metrics
        # when we're using a dummy loss, to avoid a bogus "loss_loss" value being shown.
        if "loss" in return_metrics and "loss_loss" in return_metrics:
            del return_metrics["loss_loss"]
        return return_metrics

    def test_step(self, data):
        """
        A modification of Keras's default test_step that cleans up the printed metrics when we use a dummy loss.
        """
        data = data_adapter.expand_1d(data)
        x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
        # These next two lines differ from the base method - they avoid issues when the labels are in
        # the input dict (and loss is computed internally)
        if y is None and "labels" in x:
            y = x["labels"]  # Stops confusion with metric computations
936
937
938
        elif y is None and "input_ids" in x:
            # Just make any kind of dummy array to make loss work
            y = tf.zeros(tf.shape(x["input_ids"])[0], dtype=tf.int64)
Matt's avatar
Matt committed
939
940
        y_pred = self(x, training=False)
        self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses)
941
942
943
        # Updates stateful loss metrics.
        if isinstance(y_pred, TFSeq2SeqLMOutput) and isinstance(y, tf.Tensor):
            y_pred = y_pred["logits"]
Matt's avatar
Matt committed
944
945
        self.compiled_metrics.update_state(y, y_pred, sample_weight)
        # Collect metrics to return
946
        return_metrics = {}
Matt's avatar
Matt committed
947
948
949
950
951
952
953
954
955
956
957
958
        for metric in self.metrics:
            result = metric.result()
            if isinstance(result, dict):
                return_metrics.update(result)
            else:
                return_metrics[metric.name] = result
        # These next two lines are also not in the base method - they correct the displayed metrics
        # when we're using a dummy loss, to avoid a bogus "loss_loss" value being shown.
        if "loss" in return_metrics and "loss_loss" in return_metrics:
            del return_metrics["loss_loss"]
        return return_metrics

Matt's avatar
Matt committed
959
960
961
962
963
964
965
966
967
968
969
970
971
    def create_model_card(
        self,
        output_dir,
        model_name: str,
        language: Optional[str] = None,
        license: Optional[str] = None,
        tags: Optional[str] = None,
        finetuned_from: Optional[str] = None,
        tasks: Optional[str] = None,
        dataset_tags: Optional[Union[str, List[str]]] = None,
        dataset: Optional[Union[str, List[str]]] = None,
        dataset_args: Optional[Union[str, List[str]]] = None,
    ):
Sylvain Gugger's avatar
Sylvain Gugger committed
972
973
974
        # Avoids a circular import by doing this when necessary.
        from .modelcard import TrainingSummary

Matt's avatar
Matt committed
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
        training_summary = TrainingSummary.from_keras(
            self,
            keras_history=self.history,
            language=language,
            license=license,
            tags=tags,
            model_name=model_name,
            finetuned_from=finetuned_from,
            tasks=tasks,
            dataset_tags=dataset_tags,
            dataset=dataset,
            dataset_args=dataset_args,
        )
        model_card = training_summary.to_model_card()
        with open(os.path.join(output_dir, "README.md"), "w") as f:
            f.write(model_card)

992
993
    def set_input_embeddings(self, value):
        """
994
        Set model's input embeddings
995
996

        Args:
997
            value (`tf.Variable`):
998
                The new weights mapping hidden states to vocabulary.
999
        """
1000
        main_layer = getattr(self, self.base_model_prefix)
1001

1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
        if main_layer is None:
            raise NotImplementedError("The model does not implements the base_model_prefix attribute.")

        try:
            main_layer.set_input_embeddings(value)
        except AttributeError:
            logger.info("Building the model")
            self(self.dummy_inputs)
            main_layer.set_input_embeddings(value)

    def get_output_embeddings(self) -> Union[None, tf.keras.layers.Layer]:
1013
        """
1014
        Returns the model's output embeddings
1015
1016

        Returns:
1017
            `tf.Variable`: The new weights mapping vocabulary to hidden states.
1018
        """
1019
1020
1021
        if self.get_lm_head() is not None:
            lm_head = self.get_lm_head()

1022
1023
1024
1025
1026
1027
1028
            try:
                return lm_head.get_output_embeddings()
            except AttributeError:
                logger.info("Building the model")
                self(self.dummy_inputs)

                return lm_head().get_output_embeddings()
1029

1030
1031
        return None  # Overwrite for models with output embeddings

1032
1033
1034
1035
1036
    def set_output_embeddings(self, value):
        """
        Set model's output embeddings

        Args:
1037
            value (`tf.Variable`):
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
                The new weights mapping hidden states to vocabulary.
        """
        if self.get_lm_head() is not None:
            lm_head = self.get_lm_head()
            try:
                lm_head.set_output_embeddings(value)
            except AttributeError:
                logger.info("Building the model")
                self(self.dummy_inputs)
                lm_head.set_output_embeddings(value)

1049
1050
1051
    def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]:
        """
        Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the
1052
        embeddings
1053
1054

        Return:
1055
            `tf.keras.layers.Layer`: The layer that handles the bias, None if not an LM model.
1056
        """
1057
1058
1059
1060
        warnings.warn(
            "The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning
        )
        return self.get_lm_head()
1061
1062
1063

    def get_prefix_bias_name(self) -> Union[None, str]:
        """
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
1064
        Get the concatenated _prefix name of the bias from the model name to the parent layer
1065
1066

        Return:
1067
            `str`: The _prefix name of the bias.
1068
        """
1069
1070
1071
1072
1073
1074
1075
1076
        warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
        return None

    def get_bias(self) -> Union[None, Dict[str, tf.Variable]]:
        """
        Dict of bias attached to an LM head. The key represents the name of the bias attribute.

        Return:
1077
            `tf.Variable`: The weights representing the bias, None if not an LM model.
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
        """
        if self.get_lm_head() is not None:
            lm_head = self.get_lm_head()
            try:
                return lm_head.get_bias()
            except AttributeError:
                self(self.dummy_inputs)

                return lm_head.get_bias()
        return None

    def set_bias(self, value):
        """
        Set all the bias in the LM head.

        Args:
1094
            value (`Dict[tf.Variable]`):
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
                All the new bias attached to an LM head.
        """
        if self.get_lm_head() is not None:
            lm_head = self.get_lm_head()
            try:
                lm_head.set_bias(value)
            except AttributeError:
                self(self.dummy_inputs)
                lm_head.set_bias(value)

    def get_lm_head(self) -> tf.keras.layers.Layer:
        """
        The LM Head layer. This method must be overwritten by all the models that have a lm head.

        Return:
1110
            `tf.keras.layers.Layer`: The LM head layer if the model has one, None if not.
1111
        """
1112
1113
        return None

1114
1115
    def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable:
        """
1116
        Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`.
1117

1118
        Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
1119

1120
        Arguments:
1121
            new_num_tokens (`int`, *optional*):
1122
                The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
Sylvain Gugger's avatar
Sylvain Gugger committed
1123
1124
                vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just
                returns a pointer to the input tokens `tf.Variable` module of the model without doing anything.
1125
1126

        Return:
1127
            `tf.Variable`: Pointer to the input tokens Embeddings Module of the model.
1128
        """
1129
1130
        if new_num_tokens is None or new_num_tokens == self.config.vocab_size:
            return self._get_word_embedding_weight(self.get_input_embeddings())
1131

1132
        model_embeds = self._resize_token_embeddings(new_num_tokens)
1133
1134
1135

        # Update base model and current model config
        self.config.vocab_size = new_num_tokens
1136
1137
1138

        return model_embeds

1139
    def _get_word_embedding_weight(model, embedding_layer):
Joao Gante's avatar
Joao Gante committed
1140
1141
1142
1143
1144
        # If the variable holds the weights themselves, return them
        if isinstance(embedding_layer, tf.Tensor):
            return embedding_layer
        # Otherwise, try to get them from the layer's attributes

1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
        embeds = getattr(embedding_layer, "weight", None)
        if embeds is not None:
            return embeds

        embeds = getattr(embedding_layer, "decoder", None)
        if embeds is not None:
            return embeds

        # The reason why the attributes don't exist might be
        # because the model is not built, so retry getting
        # the argument after building the model
        model(model.dummy_inputs)

        embeds = getattr(embedding_layer, "weight", None)
        if embeds is not None:
            return embeds

        embeds = getattr(embedding_layer, "decoder", None)
        if embeds is not None:
            return embeds

        return None
1167

1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
    def _resize_token_embeddings(self, new_num_tokens):
        old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings())
        new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)

        # if word embeddings are not tied, make sure that lm head bias is resized as well
        if self.get_bias() is not None:
            old_lm_head_bias = self.get_bias()
            new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens)

            self.set_bias(new_lm_head_bias)

        # if word embeddings are not tied, make sure that lm head decoder is resized as well
        if self.get_output_embeddings() is not None:
            old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings())
            new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens)

            self.set_output_embeddings(new_lm_head_decoder)

        self.set_input_embeddings(new_embeddings)

        return self.get_input_embeddings()

    def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens):
1191
        """
1192
1193
        Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end.
        Reducing the size will remove vectors from the end
thomwolf's avatar
thomwolf committed
1194
1195

        Args:
1196
            old_lm_head_bias (`tf.Variable`):
1197
                Old lm head bias to be resized.
1198
            new_num_tokens (`int`, *optional*):
1199
                New number of tokens in the linear matrix.
1200
1201

                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
1202
                vectors from the end. If not provided or `None`, just returns None
1203
1204

        Return:
1205
            `tf.Variable`: Pointer to the resized bias.
thomwolf's avatar
thomwolf committed
1206
        """
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
        new_lm_head_bias = {}

        for attr, weight in old_lm_head_bias.items():
            first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight)
            size_diff = new_num_tokens - old_num_tokens
            final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens]

            # initialize new bias
            if tf.math.greater(size_diff, 0):
                padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]]
                current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1)
                num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
                mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy]
                bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True)
                bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False)
            else:
                slice_from = [0] if first_dim is None else [0, 0]
                current_bias = tf.slice(
                    weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape)
                )
                bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True)
1228

1229
1230
1231
1232
1233
1234
1235
            new_bias = self.add_weight(
                shape=final_shape,
                initializer="zeros",
                trainable=True,
                name=weight.name.split(":")[0],
            )
            init_bias = tf.where(bias_mask, current_bias, new_bias.value())
1236

1237
1238
            new_bias.assign(init_bias)
            new_lm_head_bias[attr] = new_bias
1239

1240
        return new_lm_head_bias
thomwolf's avatar
thomwolf committed
1241

1242
1243
1244
1245
    def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens):
        """
        Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end.
        Reducing the size will remove vectors from the end
thomwolf's avatar
thomwolf committed
1246

1247
        Args:
1248
            old_lm_head_decoder (`tf.Variable`):
1249
                Old lm head decoder to be resized.
1250
            new_num_tokens (`int`, *optional*):
1251
                New number of tokens in the linear matrix.
1252

1253
                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
1254
                vectors from the end. If not provided or `None`, just returns None
1255

1256
        Return:
Sylvain Gugger's avatar
Sylvain Gugger committed
1257
1258
            `tf.Variable`: Pointer to the resized decoder or None if the output embeddings are different from the input
            ones.
1259
1260
1261
1262
1263
        """
        new_lm_head_decoder = old_lm_head_decoder
        is_input_output_equals = tf.reduce_any(
            self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder
        )
1264

1265
1266
1267
1268
1269
        if old_lm_head_decoder is not None and not is_input_output_equals:
            old_embedding_dim = shape_list(old_lm_head_decoder)[1]
            decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens)
            new_lm_head_decoder = self.add_weight(
                shape=(new_num_tokens, old_embedding_dim),
1270
1271
                initializer="zeros",
                trainable=True,
1272
                name=old_lm_head_decoder.name.split(":")[0],
1273
            )
1274
1275
1276
            init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value())

            new_lm_head_decoder.assign(init_decoder)
1277

1278
        return new_lm_head_decoder
1279

1280
1281
1282
1283
    def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable:
        """
        Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly
        initialized vectors at the end. Reducing the size will remove vectors from the end
1284

1285
        Args:
1286
            old_embeddings (`tf.Variable`):
1287
                Old embeddings to be resized.
1288
            new_num_tokens (`int`, *optional*):
1289
                New number of tokens in the embedding matrix.
1290

1291
                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
1292
1293
                vectors from the end. If not provided or `None`, just returns a pointer to the input tokens
                ``tf.Variable``` module of the model without doing anything.
1294

1295
        Return:
Sylvain Gugger's avatar
Sylvain Gugger committed
1296
1297
            `tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if `new_num_tokens` is
            `None`
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
        """
        old_embedding_dim = shape_list(old_embeddings)[1]
        init_range = getattr(self.config, "initializer_range", 0.02)
        embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens)
        new_embeddings = self.add_weight(
            name=old_embeddings.name.split(":")[0],
            shape=[new_num_tokens, old_embedding_dim],
            initializer=get_initializer(init_range),
            dtype=tf.float32,
        )
        init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value())
1309

1310
        new_embeddings.assign(init_embeddings)
1311

1312
        return new_embeddings
thomwolf's avatar
thomwolf committed
1313
1314

    def prune_heads(self, heads_to_prune):
1315
1316
        """
        Prunes heads of the base model.
thomwolf's avatar
thomwolf committed
1317

1318
        Arguments:
1319
            heads_to_prune (`Dict[int, List[int]]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1320
1321
1322
                Dictionary with keys being selected layer indices (`int`) and associated values being the list of heads
                to prune in said layer (list of `int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on
                layer 1 and heads 2 and 3 on layer 2.
thomwolf's avatar
thomwolf committed
1323
1324
1325
        """
        raise NotImplementedError

Sylvain Gugger's avatar
Sylvain Gugger committed
1326
    def save_pretrained(self, save_directory, saved_model=False, version=1, push_to_hub=False, **kwargs):
1327
1328
        """
        Save a model and its configuration file to a directory, so that it can be re-loaded using the
1329
        [`~TFPreTrainedModel.from_pretrained`] class method.
1330
1331

        Arguments:
1332
            save_directory (`str`):
1333
                Directory to which to save. Will be created if it doesn't exist.
1334
            saved_model (`bool`, *optional*, defaults to `False`):
Julien Plu's avatar
Julien Plu committed
1335
                If the model has to be saved in saved model format as well or not.
1336
            version (`int`, *optional*, defaults to 1):
Julien Plu's avatar
Julien Plu committed
1337
1338
1339
                The version of the saved model. A saved model needs to be versioned in order to be properly loaded by
                TensorFlow Serving as detailed in the official documentation
                https://www.tensorflow.org/tfx/serving/serving_basic
1340
            push_to_hub (`bool`, *optional*, defaults to `False`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1341
                Whether or not to push your model to the Hugging Face model hub after saving it.
1342

1343
                <Tip warning={true}>
1344

Sylvain Gugger's avatar
Sylvain Gugger committed
1345
1346
1347
                Using `push_to_hub=True` will synchronize the repository you are pushing to with `save_directory`,
                which requires `save_directory` to be a local clone of the repo you are pushing to if it's an existing
                folder. Pass along `temp_dir=True` to use a temporary directory instead.
1348
1349

                </Tip>
1350

Sylvain Gugger's avatar
Sylvain Gugger committed
1351
            kwargs:
Sylvain Gugger's avatar
Sylvain Gugger committed
1352
                Additional key word arguments passed along to the [`~file_utils.PushToHubMixin.push_to_hub`] method.
thomwolf's avatar
thomwolf committed
1353
        """
1354
        if os.path.isfile(save_directory):
1355
            logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
1356
            return
1357
1358
1359
1360
1361

        if push_to_hub:
            commit_message = kwargs.pop("commit_message", None)
            repo = self._create_or_get_repo(save_directory, **kwargs)

1362
        os.makedirs(save_directory, exist_ok=True)
thomwolf's avatar
thomwolf committed
1363

Julien Plu's avatar
Julien Plu committed
1364
1365
1366
1367
1368
        if saved_model:
            saved_model_dir = os.path.join(save_directory, "saved_model", str(version))
            self.save(saved_model_dir, include_optimizer=False, signatures=self.serving)
            logger.info(f"Saved model created in {saved_model_dir}")

thomwolf's avatar
thomwolf committed
1369
        # Save configuration file
1370
        self.config.architectures = [self.__class__.__name__[2:]]
1371
1372
1373
1374
1375
1376

        # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be
        # loaded from the Hub.
        if self._auto_class is not None:
            custom_object_save(self, save_directory, config=self.config)

thomwolf's avatar
thomwolf committed
1377
1378
1379
1380
1381
        self.config.save_pretrained(save_directory)

        # If we save using the predefined names, we can load using `from_pretrained`
        output_model_file = os.path.join(save_directory, TF2_WEIGHTS_NAME)
        self.save_weights(output_model_file)
1382
        logger.info(f"Model weights saved in {output_model_file}")
thomwolf's avatar
thomwolf committed
1383

Sylvain Gugger's avatar
Sylvain Gugger committed
1384
        if push_to_hub:
1385
            url = self._push_to_hub(repo, commit_message=commit_message)
Sylvain Gugger's avatar
Sylvain Gugger committed
1386
1387
            logger.info(f"Model pushed to the hub in this commit: {url}")

thomwolf's avatar
thomwolf committed
1388
1389
    @classmethod
    def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
1390
1391
        r"""
        Instantiate a pretrained TF 2.0 model from a pre-trained model configuration.
thomwolf's avatar
thomwolf committed
1392

1393
        The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
1394
1395
        pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
        task.
thomwolf's avatar
thomwolf committed
1396

1397
        The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
1398
        weights are discarded.
thomwolf's avatar
thomwolf committed
1399
1400

        Parameters:
1401
            pretrained_model_name_or_path (`str`, *optional*):
1402
1403
                Can be either:

1404
                    - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Sylvain Gugger's avatar
Sylvain Gugger committed
1405
1406
                      Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
                      user or organization name, like `dbmdz/bert-base-german-cased`.
1407
1408
                    - A path to a *directory* containing model weights saved using
                      [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
Sylvain Gugger's avatar
Sylvain Gugger committed
1409
1410
1411
1412
                    - A path or url to a *PyTorch state_dict save file* (e.g, `./pt_model/pytorch_model.bin`). In this
                      case, `from_pt` should be set to `True` and a configuration object should be provided as `config`
                      argument. This loading path is slower than converting the PyTorch model in a TensorFlow model
                      using the provided conversion scripts and loading the TensorFlow model afterwards.
1413
1414
1415
1416
1417
                    - `None` if you are both providing the configuration and state dictionary (resp. with keyword
                      arguments `config` and `state_dict`).
            model_args (sequence of positional arguments, *optional*):
                All remaining positional arguments will be passed to the underlying model's `__init__` method.
            config (`Union[PretrainedConfig, str]`, *optional*):
1418
1419
                Can be either:

1420
1421
                    - an instance of a class derived from [`PretrainedConfig`],
                    - a string valid as input to [`~PretrainedConfig.from_pretrained`].
1422

1423
                Configuration for the model to use instead of an automatically loaded configuration. Configuration can
1424
1425
                be automatically loaded when:

1426
                    - The model is a model provided by the library (loaded with the *model id* string of a pretrained
1427
                      model).
Sylvain Gugger's avatar
Sylvain Gugger committed
1428
1429
                    - The model was saved using [`~TFPreTrainedModel.save_pretrained`] and is reloaded by supplying the
                      save directory.
1430
1431
1432
                    - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
                      configuration JSON file named *config.json* is found in the directory.
            from_pt: (`bool`, *optional*, defaults to `False`):
1433
                Load the model weights from a PyTorch state_dict save file (see docstring of
1434
1435
                `pretrained_model_name_or_path` argument).
            ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):
1436
1437
1438
                Whether or not to raise an error if some of the weights from the checkpoint do not have the same size
                as the weights of the model (if for instance, you are instantiating a model with 10 labels from a
                checkpoint with 3 labels).
1439
            cache_dir (`str`, *optional*):
1440
1441
                Path to a directory in which a downloaded pretrained model configuration should be cached if the
                standard cache should not be used.
1442
            force_download (`bool`, *optional*, defaults to `False`):
1443
1444
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
1445
            resume_download (`bool`, *optional*, defaults to `False`):
1446
1447
                Whether or not to delete incompletely received files. Will attempt to resume the download if such a
                file exists.
Sylvain Gugger's avatar
Sylvain Gugger committed
1448
1449
1450
1451
1452
            proxies:
                (`Dict[str, str], `optional`): A dictionary of proxy servers to use by protocol or endpoint, e.g.,
                `{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
                output_loading_info(`bool`, *optional*, defaults to `False`): Whether ot not to also return a
                dictionary containing missing keys, unexpected keys and error messages.
1453
            local_files_only(`bool`, *optional*, defaults to `False`):
1454
                Whether or not to only look at local files (e.g., not try doanloading the model).
1455
            use_auth_token (`str` or *bool*, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
1456
1457
                The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
                when running `transformers-cli login` (stored in `~/.huggingface`).
1458
            revision(`str`, *optional*, defaults to `"main"`):
Julien Chaumond's avatar
Julien Chaumond committed
1459
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
1460
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
Julien Chaumond's avatar
Julien Chaumond committed
1461
                identifier allowed by git.
1462
            mirror(`str`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
1463
1464
1465
                Mirror source to accelerate downloads in China. If you are from China and have an accessibility
                problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
                Please refer to the mirror site for more information.
1466
            kwargs (remaining dictionary of keyword arguments, *optional*):
1467
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
1468
                `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
1469
1470
                automatically loaded:

1471
1472
                    - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
                      underlying model's `__init__` method (we assume all relevant updates to the configuration have
1473
                      already been done)
1474
                    - If a configuration is not provided, `kwargs` will be first passed to the configuration class
Sylvain Gugger's avatar
Sylvain Gugger committed
1475
1476
1477
1478
                      initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
                      corresponds to a configuration attribute will be used to override said attribute with the
                      supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
                      will be passed to the underlying model's `__init__` function.
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489

        <Tip>

        Passing `use_auth_token=True` is required when you want to use a private model.

        </Tip>

        Examples:

        ```python
        >>> from transformers import BertConfig, TFBertModel
Sylvain Gugger's avatar
Sylvain Gugger committed
1490

1491
        >>> # Download model and configuration from huggingface.co and cache.
Sylvain Gugger's avatar
Sylvain Gugger committed
1492
        >>> model = TFBertModel.from_pretrained("bert-base-uncased")
1493
        >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable).
Sylvain Gugger's avatar
Sylvain Gugger committed
1494
        >>> model = TFBertModel.from_pretrained("./test/saved_model/")
1495
        >>> # Update configuration during loading.
Sylvain Gugger's avatar
Sylvain Gugger committed
1496
        >>> model = TFBertModel.from_pretrained("bert-base-uncased", output_attentions=True)
1497
1498
        >>> assert model.config.output_attentions == True
        >>> # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable).
Sylvain Gugger's avatar
Sylvain Gugger committed
1499
1500
        >>> config = BertConfig.from_json_file("./pt_model/my_pt_model_config.json")
        >>> model = TFBertModel.from_pretrained("./pt_model/my_pytorch_model.bin", from_pt=True, config=config)
1501
        ```"""
1502
1503
1504
        config = kwargs.pop("config", None)
        cache_dir = kwargs.pop("cache_dir", None)
        from_pt = kwargs.pop("from_pt", False)
1505
        ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False)
1506
1507
1508
1509
        force_download = kwargs.pop("force_download", False)
        resume_download = kwargs.pop("resume_download", False)
        proxies = kwargs.pop("proxies", None)
        output_loading_info = kwargs.pop("output_loading_info", False)
1510
        local_files_only = kwargs.pop("local_files_only", False)
1511
        use_auth_token = kwargs.pop("use_auth_token", None)
Julien Chaumond's avatar
Julien Chaumond committed
1512
        revision = kwargs.pop("revision", None)
1513
        mirror = kwargs.pop("mirror", None)
Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
1514
        load_weight_prefix = kwargs.pop("load_weight_prefix", None)
1515
1516
1517
1518
1519
1520
        from_pipeline = kwargs.pop("_from_pipeline", None)
        from_auto_class = kwargs.pop("_from_auto", False)

        user_agent = {"file_type": "model", "framework": "tensorflow", "from_auto_class": from_auto_class}
        if from_pipeline is not None:
            user_agent["using_pipeline"] = from_pipeline
thomwolf's avatar
thomwolf committed
1521

1522
1523
1524
1525
        if is_offline_mode() and not local_files_only:
            logger.info("Offline mode: forcing local_files_only=True")
            local_files_only = True

1526
1527
1528
        # Load config if we don't provide a configuration
        if not isinstance(config, PretrainedConfig):
            config_path = config if config is not None else pretrained_model_name_or_path
thomwolf's avatar
thomwolf committed
1529
            config, model_kwargs = cls.config_class.from_pretrained(
1530
1531
1532
                config_path,
                cache_dir=cache_dir,
                return_unused_kwargs=True,
thomwolf's avatar
thomwolf committed
1533
                force_download=force_download,
1534
                resume_download=resume_download,
1535
1536
                proxies=proxies,
                local_files_only=local_files_only,
1537
                use_auth_token=use_auth_token,
Julien Chaumond's avatar
Julien Chaumond committed
1538
                revision=revision,
1539
1540
                _from_auto=from_auto_class,
                _from_pipeline=from_pipeline,
1541
                **kwargs,
thomwolf's avatar
thomwolf committed
1542
1543
1544
1545
1546
            )
        else:
            model_kwargs = kwargs

        # Load model
thomwolf's avatar
thomwolf committed
1547
        if pretrained_model_name_or_path is not None:
1548
            if os.path.isdir(pretrained_model_name_or_path):
1549
1550
1551
1552
                if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
                    # Load from a PyTorch checkpoint in priority if from_pt
                    archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
                elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
thomwolf's avatar
thomwolf committed
1553
1554
                    # Load from a TF 2.0 checkpoint
                    archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
1555
1556
1557
1558
1559
1560
1561
                # At this stage we don't have a weight file so we will raise an error.
                elif os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME):
                    raise EnvironmentError(
                        f"Error no file named {TF2_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} "
                        "but there is a file for PyTorch weights. Use `from_pt=True` to load this model from those "
                        "weights."
                    )
thomwolf's avatar
thomwolf committed
1562
                else:
1563
                    raise EnvironmentError(
1564
1565
                        f"Error no file named {TF2_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory "
                        f"{pretrained_model_name_or_path}."
1566
                    )
Julien Chaumond's avatar
Julien Chaumond committed
1567
            elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
thomwolf's avatar
thomwolf committed
1568
                archive_file = pretrained_model_name_or_path
1569
1570
            elif os.path.isfile(pretrained_model_name_or_path + ".index"):
                archive_file = pretrained_model_name_or_path + ".index"
thomwolf's avatar
thomwolf committed
1571
            else:
1572
                filename = WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME
thomwolf's avatar
thomwolf committed
1573
                archive_file = hf_bucket_url(
Julien Chaumond's avatar
Julien Chaumond committed
1574
                    pretrained_model_name_or_path,
1575
                    filename=filename,
Julien Chaumond's avatar
Julien Chaumond committed
1576
                    revision=revision,
1577
                    mirror=mirror,
thomwolf's avatar
thomwolf committed
1578
                )
thomwolf's avatar
thomwolf committed
1579
1580

            try:
1581
                # Load from URL or cache if already cached
1582
1583
1584
1585
1586
                resolved_archive_file = cached_path(
                    archive_file,
                    cache_dir=cache_dir,
                    force_download=force_download,
                    proxies=proxies,
1587
1588
                    resume_download=resume_download,
                    local_files_only=local_files_only,
1589
                    use_auth_token=use_auth_token,
1590
                    user_agent=user_agent,
1591
                )
1592

1593
            except RepositoryNotFoundError:
1594
1595
1596
1597
1598
1599
                raise EnvironmentError(
                    f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
                    "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
                    "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
                    "login` and pass `use_auth_token=True`."
                )
1600
            except RevisionNotFoundError:
1601
1602
1603
1604
1605
                raise EnvironmentError(
                    f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
                    "this model name. Check the model page at "
                    f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions."
                )
1606
            except EntryNotFoundError:
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
                if filename == TF2_WEIGHTS_NAME:
                    has_file_kwargs = {
                        "revision": revision,
                        "mirror": mirror,
                        "proxies": proxies,
                        "use_auth_token": use_auth_token,
                    }
                    if has_file(pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs):
                        raise EnvironmentError(
                            f"{pretrained_model_name_or_path} does not appear to have a file named {TF2_WEIGHTS_NAME} "
                            "but there is a file for PyTorch weights. Use `from_pt=True` to load this model from "
                            "those weights."
                        )
                    else:
                        raise EnvironmentError(
                            f"{pretrained_model_name_or_path} does not appear to have a file named {TF2_WEIGHTS_NAME} "
                            f"or {WEIGHTS_NAME}."
                        )
                else:
                    raise EnvironmentError(
                        f"{pretrained_model_name_or_path} does not appear to have a file named {filename}."
                    )
1629
            except HTTPError:
1630
1631
1632
1633
1634
1635
1636
                raise EnvironmentError(
                    "We couldn't connect to 'https://huggingface.co/' to load this model and it looks like "
                    f"{pretrained_model_name_or_path} is not the path to a directory conaining a a file named "
                    f"{TF2_WEIGHTS_NAME} or {WEIGHTS_NAME}.\n"
                    "Checkout your internet connection or see how to run the library in offline mode at "
                    "'https://huggingface.co/docs/transformers/installation#offline-mode'."
                )
1637
            except EnvironmentError:
1638
1639
1640
1641
1642
                raise EnvironmentError(
                    f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
                    "'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
                    f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
                    f"containing a file named {TF2_WEIGHTS_NAME} or {WEIGHTS_NAME}."
1643
                )
1644

thomwolf's avatar
thomwolf committed
1645
            if resolved_archive_file == archive_file:
1646
                logger.info(f"loading weights file {archive_file}")
thomwolf's avatar
thomwolf committed
1647
            else:
1648
                logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}")
thomwolf's avatar
thomwolf committed
1649
        else:
thomwolf's avatar
thomwolf committed
1650
            resolved_archive_file = None
thomwolf's avatar
thomwolf committed
1651

1652
1653
        config.name_or_path = pretrained_model_name_or_path

Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
1654
1655
1656
1657
1658
        # composed models, *e.g.* TFRag, require special treatment when it comes to loading
        # pre-trained weights.
        if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None:
            model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name")

thomwolf's avatar
thomwolf committed
1659
1660
1661
1662
        # Instantiate model.
        model = cls(config, *model_args, **model_kwargs)

        if from_pt:
Julien Plu's avatar
Julien Plu committed
1663
1664
            from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model

thomwolf's avatar
thomwolf committed
1665
            # Load from a PyTorch checkpoint
thomwolf's avatar
thomwolf committed
1666
            return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True)
thomwolf's avatar
thomwolf committed
1667

Ratthachat (Jung)'s avatar
Ratthachat (Jung) committed
1668
1669
1670
1671
1672
1673
        # we might need to extend the variable scope for composite models
        if load_weight_prefix is not None:
            with tf.compat.v1.variable_scope(load_weight_prefix):
                model(model.dummy_inputs)  # build the network with dummy inputs
        else:
            model(model.dummy_inputs)  # build the network with dummy inputs
thomwolf's avatar
thomwolf committed
1674

1675
        assert os.path.isfile(resolved_archive_file), f"Error retrieving file {resolved_archive_file}"
thomwolf's avatar
thomwolf committed
1676
1677
        # 'by_name' allow us to do transfer learning by skipping/adding layers
        # see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357
1678
        try:
1679
1680
1681
1682
1683
1684
            missing_keys, unexpected_keys, mismatched_keys = load_tf_weights(
                model,
                resolved_archive_file,
                ignore_mismatched_sizes=ignore_mismatched_sizes,
                _prefix=load_weight_prefix,
            )
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
        except OSError as e:
            try:
                with open(resolved_archive_file) as f:
                    if f.read().startswith("version"):
                        raise OSError(
                            "You seem to have cloned a repository without having git-lfs installed. Please install "
                            "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder "
                            "you cloned."
                        )
                    else:
                        raise ValueError from e
            except (UnicodeDecodeError, ValueError):
                raise OSError(
                    "Unable to load weights from h5 file. "
                    "If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. "
                )
thomwolf's avatar
thomwolf committed
1701

Julien Plu's avatar
Julien Plu committed
1702
        model(model.dummy_inputs)  # Make sure restore ops are run
thomwolf's avatar
thomwolf committed
1703

1704
1705
        if cls._keys_to_ignore_on_load_missing is not None:
            for pat in cls._keys_to_ignore_on_load_missing:
1706
1707
                missing_keys = [k for k in missing_keys if re.search(pat, k) is None]

1708
1709
        if cls._keys_to_ignore_on_load_unexpected is not None:
            for pat in cls._keys_to_ignore_on_load_unexpected:
Julien Plu's avatar
Julien Plu committed
1710
1711
                unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]

1712
1713
        if len(unexpected_keys) > 0:
            logger.warning(
Julien Plu's avatar
Julien Plu committed
1714
                f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when "
1715
1716
                f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
                f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
1717
                f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
1718
1719
1720
1721
                f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
                f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
            )
        else:
Julien Plu's avatar
Julien Plu committed
1722
1723
            logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n")

thomwolf's avatar
thomwolf committed
1724
        if len(missing_keys) > 0:
1725
            logger.warning(
Julien Plu's avatar
Julien Plu committed
1726
                f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
1727
1728
                f"and are newly initialized: {missing_keys}\n"
                f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
1729
            )
1730
        elif len(mismatched_keys) == 0:
1731
            logger.warning(
Julien Plu's avatar
Julien Plu committed
1732
                f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
1733
                f"If your task is similar to the task the model of the checkpoint was trained on, "
1734
                f"you can already use {model.__class__.__name__} for predictions without further training."
1735
            )
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
        if len(mismatched_keys) > 0:
            mismatched_warning = "\n".join(
                [
                    f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
                    for key, shape1, shape2 in mismatched_keys
                ]
            )
            logger.warning(
                f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
                f"and are newly initialized because the shapes did not match:\n{mismatched_warning}\n"
                f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
            )
Julien Plu's avatar
Julien Plu committed
1748

thomwolf's avatar
thomwolf committed
1749
        if output_loading_info:
1750
1751
1752
1753
1754
            loading_info = {
                "missing_keys": missing_keys,
                "unexpected_keys": unexpected_keys,
                "mismatched_keys": mismatched_keys,
            }
Julien Plu's avatar
Julien Plu committed
1755

thomwolf's avatar
thomwolf committed
1756
1757
            return model, loading_info

thomwolf's avatar
thomwolf committed
1758
        return model
thomwolf's avatar
WIP  
thomwolf committed
1759

1760

1761
1762
1763
1764
1765
1766
1767
# To update the docstring, we need to copy the method, otherwise we change the original docstring.
TFPreTrainedModel.push_to_hub = copy_func(TFPreTrainedModel.push_to_hub)
TFPreTrainedModel.push_to_hub.__doc__ = TFPreTrainedModel.push_to_hub.__doc__.format(
    object="model", object_class="TFAutoModel", object_files="model checkpoint"
)


thomwolf's avatar
WIP  
thomwolf committed
1768
class TFConv1D(tf.keras.layers.Layer):
Sylvain Gugger's avatar
Sylvain Gugger committed
1769
1770
1771
1772
1773
1774
    """
    1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).

    Basically works like a linear layer but the weights are transposed.

    Args:
1775
        nf (`int`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1776
            The number of output features.
1777
        nx (`int`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1778
            The number of input features.
1779
        initializer_range (`float`, *optional*, defaults to 0.02):
Sylvain Gugger's avatar
Sylvain Gugger committed
1780
1781
            The standard deviation to use to initialize the weights.
        kwargs:
1782
            Additional keyword arguments passed along to the `__init__` of `tf.keras.layers.Layer`.
Sylvain Gugger's avatar
Sylvain Gugger committed
1783
1784
    """

thomwolf's avatar
thomwolf committed
1785
    def __init__(self, nf, nx, initializer_range=0.02, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
1786
        super().__init__(**kwargs)
thomwolf's avatar
WIP  
thomwolf committed
1787
        self.nf = nf
thomwolf's avatar
thomwolf committed
1788
        self.nx = nx
thomwolf's avatar
thomwolf committed
1789
        self.initializer_range = initializer_range
thomwolf's avatar
thomwolf committed
1790
1791
1792

    def build(self, input_shape):
        self.weight = self.add_weight(
1793
1794
1795
            "weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range)
        )
        self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer())
thomwolf's avatar
thomwolf committed
1796

thomwolf's avatar
WIP  
thomwolf committed
1797
    def call(self, x):
thomwolf's avatar
thomwolf committed
1798
        bz, sl = shape_list(x)[:2]
thomwolf's avatar
thomwolf committed
1799

thomwolf's avatar
thomwolf committed
1800
        x = tf.reshape(x, [-1, self.nx])
thomwolf's avatar
thomwolf committed
1801
        x = tf.matmul(x, self.weight) + self.bias
thomwolf's avatar
thomwolf committed
1802
1803

        x = tf.reshape(x, [bz, sl, self.nf])
thomwolf's avatar
thomwolf committed
1804

thomwolf's avatar
WIP  
thomwolf committed
1805
        return x
thomwolf's avatar
thomwolf committed
1806
1807


thomwolf's avatar
thomwolf committed
1808
class TFSharedEmbeddings(tf.keras.layers.Layer):
Stas Bekman's avatar
Stas Bekman committed
1809
    r"""
Sylvain Gugger's avatar
Sylvain Gugger committed
1810
    Construct shared token embeddings.
1811

Sylvain Gugger's avatar
Sylvain Gugger committed
1812
1813
    The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language
    modeling.
Sylvain Gugger's avatar
Sylvain Gugger committed
1814
1815

    Args:
1816
        vocab_size (`int`):
1817
            The size of the vocabulary, e.g., the number of unique tokens.
1818
        hidden_size (`int`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1819
            The size of the embedding vectors.
1820
        initializer_range (`float`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
1821
            The standard deviation to use when initializing the weights. If no value is provided, it will default to
1822
            \\(1/\sqrt{hidden\_size}\\).
Sylvain Gugger's avatar
Sylvain Gugger committed
1823
        kwargs:
1824
            Additional keyword arguments passed along to the `__init__` of `tf.keras.layers.Layer`.
Sylvain Gugger's avatar
Sylvain Gugger committed
1825
1826
1827
    """

    def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
1828
        super().__init__(**kwargs)
thomwolf's avatar
thomwolf committed
1829
1830
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
1831
        self.initializer_range = hidden_size**-0.5 if initializer_range is None else initializer_range
thomwolf's avatar
thomwolf committed
1832
1833

    def build(self, input_shape):
Sylvain Gugger's avatar
Sylvain Gugger committed
1834
1835
1836
        """
        Build shared token embedding layer Shared weights logic adapted from
        https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
thomwolf's avatar
thomwolf committed
1837
1838
        """
        self.weight = self.add_weight(
1839
1840
            "weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range)
        )
Julien Chaumond's avatar
Julien Chaumond committed
1841
        super().build(input_shape)
thomwolf's avatar
thomwolf committed
1842

Julien Plu's avatar
Julien Plu committed
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
    def get_config(self):
        config = {
            "vocab_size": self.vocab_size,
            "hidden_size": self.hidden_size,
            "initializer_range": self.initializer_range,
        }
        base_config = super().get_config()

        return dict(list(base_config.items()) + list(config.items()))

Sylvain Gugger's avatar
Sylvain Gugger committed
1853
1854
1855
1856
    def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor:
        """
        Get token embeddings of inputs or decode final hidden state.

thomwolf's avatar
thomwolf committed
1857
        Args:
1858
1859
            inputs (`tf.Tensor`):
                In embedding mode, should be an int64 tensor with shape `[batch_size, length]`.
Sylvain Gugger's avatar
Sylvain Gugger committed
1860

1861
1862
                In linear mode, should be a float tensor with shape `[batch_size, length, hidden_size]`.
            mode (`str`, defaults to `"embedding"`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1863
1864
               A valid value is either `"embedding"` or `"linear"`, the first one indicates that the layer should be
               used as an embedding layer, the second one that the layer should be used as a linear decoder.
Sylvain Gugger's avatar
Sylvain Gugger committed
1865

thomwolf's avatar
thomwolf committed
1866
        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
1867
1868
            `tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape `[batch_size, length,
            embedding_size]`.
Sylvain Gugger's avatar
Sylvain Gugger committed
1869

1870
            In linear mode, the output is a float32 with shape `[batch_size, length, vocab_size]`.
Sylvain Gugger's avatar
Sylvain Gugger committed
1871

thomwolf's avatar
thomwolf committed
1872
        Raises:
1873
            ValueError: if `mode` is not valid.
1874

Sylvain Gugger's avatar
Sylvain Gugger committed
1875
1876
        Shared weights logic is adapted from
        [here](https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24).
thomwolf's avatar
thomwolf committed
1877
1878
1879
1880
1881
1882
        """
        if mode == "embedding":
            return self._embedding(inputs)
        elif mode == "linear":
            return self._linear(inputs)
        else:
1883
            raise ValueError(f"mode {mode} is not valid.")
thomwolf's avatar
thomwolf committed
1884
1885
1886
1887
1888
1889
1890

    def _embedding(self, input_ids):
        """Applies embedding based on inputs tensor."""
        return tf.gather(self.weight, input_ids)

    def _linear(self, inputs):
        """
Julien Plu's avatar
Julien Plu committed
1891
        Computes logits by running inputs through a linear layer.
thomwolf's avatar
thomwolf committed
1892

Julien Plu's avatar
Julien Plu committed
1893
1894
1895
1896
1897
1898
1899
        Args:
            inputs: A float32 tensor with shape [..., hidden_size]

        Returns:
            float32 tensor with shape [..., vocab_size].
        """
        first_dims = shape_list(inputs)[:-1]
thomwolf's avatar
thomwolf committed
1900
1901
1902
1903
1904
1905
        x = tf.reshape(inputs, [-1, self.hidden_size])
        logits = tf.matmul(x, self.weight, transpose_b=True)

        return tf.reshape(logits, first_dims + [self.vocab_size])


thomwolf's avatar
thomwolf committed
1906
class TFSequenceSummary(tf.keras.layers.Layer):
Julien Plu's avatar
Julien Plu committed
1907
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1908
1909
1910
    Compute a single vector summary of a sequence hidden states.

    Args:
1911
        config ([`PretrainedConfig`]):
Sylvain Gugger's avatar
Sylvain Gugger committed
1912
1913
            The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
            config class of your model for the default values it uses):
Sylvain Gugger's avatar
Sylvain Gugger committed
1914

1915
            - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are:
Sylvain Gugger's avatar
Sylvain Gugger committed
1916

1917
1918
1919
1920
1921
                - `"last"` -- Take the last token hidden state (like XLNet)
                - `"first"` -- Take the first token hidden state (like Bert)
                - `"mean"` -- Take the mean of all tokens hidden states
                - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
                - `"attn"` -- Not implemented now, use multi-head attention
Sylvain Gugger's avatar
Sylvain Gugger committed
1922

1923
            - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.
Sylvain Gugger's avatar
Sylvain Gugger committed
1924
1925
1926
1927
1928
1929
            - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes
              (otherwise to `config.hidden_size`).
            - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output,
              another string or `None` will add no activation.
            - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.
            - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.
Sylvain Gugger's avatar
Sylvain Gugger committed
1930

1931
        initializer_range (`float`, defaults to 0.02): The standard deviation to use to initialize the weights.
Sylvain Gugger's avatar
Sylvain Gugger committed
1932
        kwargs:
1933
            Additional keyword arguments passed along to the `__init__` of `tf.keras.layers.Layer`.
thomwolf's avatar
thomwolf committed
1934
    """
1935

Sylvain Gugger's avatar
Sylvain Gugger committed
1936
    def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
1937
        super().__init__(**kwargs)
thomwolf's avatar
thomwolf committed
1938

1939
1940
        self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last"
        if self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
1941
1942
1943
1944
1945
            # We should use a standard multi-head attention module with absolute positional embedding for that.
            # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
            # We can probably just use the multi-head attention module of PyTorch >=1.1.0
            raise NotImplementedError

1946
        self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj
1947
        if self.has_summary:
1948
            if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
thomwolf's avatar
thomwolf committed
1949
1950
1951
                num_classes = config.num_labels
            else:
                num_classes = config.hidden_size
1952
1953
1954
            self.summary = tf.keras.layers.Dense(
                num_classes, kernel_initializer=get_initializer(initializer_range), name="summary"
            )
thomwolf's avatar
thomwolf committed
1955

1956
1957
1958
1959
1960
        self.has_activation = False
        activation_string = getattr(config, "summary_activation", None)
        if activation_string is not None:
            self.has_activation = True
            self.activation = get_tf_activation(activation_string)
thomwolf's avatar
thomwolf committed
1961

1962
        self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0
1963
        if self.has_first_dropout:
thomwolf's avatar
thomwolf committed
1964
1965
            self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout)

1966
        self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0
1967
        if self.has_last_dropout:
thomwolf's avatar
thomwolf committed
1968
1969
            self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout)

Julien Plu's avatar
Julien Plu committed
1970
    def call(self, inputs, cls_index=None, training=False):
thomwolf's avatar
thomwolf committed
1971
1972
1973
1974
1975
1976
1977
        if not isinstance(inputs, (dict, tuple, list)):
            hidden_states = inputs
        elif isinstance(inputs, (tuple, list)):
            hidden_states = inputs[0]
            cls_index = inputs[1] if len(inputs) > 1 else None
            assert len(inputs) <= 2, "Too many inputs."
        else:
1978
            hidden_states = inputs.get("hidden_states")
1979
            cls_index = inputs.get("cls_index", None)
thomwolf's avatar
thomwolf committed
1980

1981
        if self.summary_type == "last":
thomwolf's avatar
thomwolf committed
1982
            output = hidden_states[:, -1]
1983
        elif self.summary_type == "first":
thomwolf's avatar
thomwolf committed
1984
            output = hidden_states[:, 0]
1985
        elif self.summary_type == "mean":
Lysandre's avatar
Lysandre committed
1986
            output = tf.reduce_mean(hidden_states, axis=1)
1987
        elif self.summary_type == "cls_index":
1988
            hidden_shape = shape_list(hidden_states)  # e.g. [batch, num choices, seq length, hidden dims]
thomwolf's avatar
thomwolf committed
1989
            if cls_index is None:
1990
1991
1992
                cls_index = tf.fill(
                    hidden_shape[:-2], hidden_shape[-2] - 1
                )  # A tensor full of shape [batch] or [batch, num choices] full of sequence length
1993
1994
            cls_shape = shape_list(cls_index)
            if len(cls_shape) <= len(hidden_shape) - 2:
1995
                cls_index = tf.expand_dims(cls_index, axis=-1)
1996
            # else:
1997
1998
            # cls_index = cls_index[..., tf.newaxis]
            # cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),))
thomwolf's avatar
thomwolf committed
1999
            # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
2000
            output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2)
2001
2002
2003
2004
            output = tf.squeeze(
                output, axis=len(hidden_shape) - 2
            )  # shape of output: (batch, num choices, hidden_size)
        elif self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
2005
2006
            raise NotImplementedError

2007
2008
        if self.has_first_dropout:
            output = self.first_dropout(output, training=training)
thomwolf's avatar
thomwolf committed
2009

2010
        if self.has_summary:
2011
            output = self.summary(output)
thomwolf's avatar
thomwolf committed
2012

2013
        if self.has_activation:
thomwolf's avatar
thomwolf committed
2014
2015
            output = self.activation(output)

2016
2017
        if self.has_last_dropout:
            output = self.last_dropout(output, training=training)
thomwolf's avatar
thomwolf committed
2018
2019
2020

        return output

2021
2022
2023
2024
2025
2026
    @classmethod
    def register_for_auto_class(cls, auto_class="TFAutoModel"):
        """
        Register this class with a given auto class. This should only be used for custom models as the ones in the
        library are already mapped with an auto class.

2027
2028
2029
2030
2031
2032
        <Tip warning={true}>

        This API is experimental and may have some slight breaking changes in the next releases.

        </Tip>

2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
        Args:
            auto_class (`str` or `type`, *optional*, defaults to `"TFAutoModel"`):
                The auto class to register this new model with.
        """
        if not isinstance(auto_class, str):
            auto_class = auto_class.__name__

        import transformers.models.auto as auto_module

        if not hasattr(auto_module, auto_class):
            raise ValueError(f"{auto_class} is not a valid auto class.")

        cls._auto_class = auto_class

2047

Sylvain Gugger's avatar
Sylvain Gugger committed
2048
2049
def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal:
    """
2050
    Creates a `tf.initializers.TruncatedNormal` with the given range.
Sylvain Gugger's avatar
Sylvain Gugger committed
2051

Julien Chaumond's avatar
Julien Chaumond committed
2052
    Args:
2053
        initializer_range (*float*, defaults to 0.02): Standard deviation of the initializer range.
Sylvain Gugger's avatar
Sylvain Gugger committed
2054

Julien Chaumond's avatar
Julien Chaumond committed
2055
    Returns:
2056
        `tf.initializers.TruncatedNormal`: The truncated normal initializer.
Julien Chaumond's avatar
Julien Chaumond committed
2057
2058
    """
    return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
2059
2060


Sam Shleifer's avatar
Sam Shleifer committed
2061
2062
class TFWrappedEmbeddings:
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
2063
2064
2065
    this class wraps a the TFSharedEmbeddingTokens layer into a python 'no-keras-layer' class to avoid problem with
    weight restoring. Also it makes sure that the layer is called from the correct scope to avoid problem with
    saving/storing the correct weights
Sam Shleifer's avatar
Sam Shleifer committed
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
    """

    def __init__(self, layer, abs_scope_name=None):
        self._layer = layer
        self._abs_scope_name = abs_scope_name

    def call(self, inputs, mode="embedding"):
        if self._abs_scope_name is None:
            return self._layer.call(inputs, mode)

        # if an abs scope name is given to the embedding variable, call variable from absolute scope
        with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
            with tf.name_scope(abs_scope_name.original_name_scope):
                return self._layer.call(inputs, mode)

    def __call__(self, inputs, mode="embedding"):
        if self._abs_scope_name is None:
            return self._layer(inputs, mode)

        # if an abs scope name is given to the embedding variable, call variable from absolute scope
        with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
            with tf.name_scope(abs_scope_name.original_name_scope):
                return self._layer(inputs, mode)