"sgl-kernel/csrc/vscode:/vscode.git/clone" did not exist on "66fb9b130751f62b746709f140f462ae1001f183"
modeling_utils.py 98.8 KB
Newer Older
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Patrick von Platen's avatar
Patrick von Platen committed
17
import inspect
18
import os
19
import re
20
import warnings
21
from contextlib import contextmanager
22
from dataclasses import dataclass
23
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
24
25

import torch
26
from torch import Tensor, device, nn
27
from torch.nn import CrossEntropyLoss
28

29
from .activations import get_activation
30
from .configuration_utils import PretrainedConfig
31
from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled
32
from .file_utils import (
Aymeric Augustin's avatar
Aymeric Augustin committed
33
    DUMMY_INPUTS,
34
    FLAX_WEIGHTS_NAME,
35
36
37
    TF2_WEIGHTS_NAME,
    TF_WEIGHTS_NAME,
    WEIGHTS_NAME,
38
    ModelOutput,
Sylvain Gugger's avatar
Sylvain Gugger committed
39
    PushToHubMixin,
40
41
    cached_path,
    hf_bucket_url,
42
    is_offline_mode,
43
    is_remote_url,
Sylvain Gugger's avatar
Sylvain Gugger committed
44
    replace_return_docstrings,
45
)
46
from .generation_utils import GenerationMixin
Lysandre Debut's avatar
Lysandre Debut committed
47
from .utils import logging
48

Aymeric Augustin's avatar
Aymeric Augustin committed
49

Lysandre Debut's avatar
Lysandre Debut committed
50
logger = logging.get_logger(__name__)
51

52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71

_init_weights = True


@contextmanager
def no_init_weights(_enable=True):
    """
    Context manager to globally disable weight initialization to speed up loading large models.

    TODO(Patrick): Delete safety argument `_enable=True` at next major version. .
    """
    global _init_weights
    if _enable:
        _init_weights = False
    try:
        yield
    finally:
        _init_weights = True


thomwolf's avatar
thomwolf committed
72
73
74
75
76
try:
    from torch.nn import Identity
except ImportError:
    # Older PyTorch compatibility
    class Identity(nn.Module):
Lysandre's avatar
Lysandre committed
77
        r"""A placeholder identity operator that is argument-insensitive."""
78

thomwolf's avatar
thomwolf committed
79
        def __init__(self, *args, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
80
            super().__init__()
thomwolf's avatar
thomwolf committed
81
82
83
84

        def forward(self, input):
            return input

85

86
def find_pruneable_heads_and_indices(
Sylvain Gugger's avatar
Sylvain Gugger committed
87
88
89
90
91
92
93
94
95
96
97
98
99
100
    heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int]
) -> Tuple[Set[int], torch.LongTensor]:
    """
    Finds the heads and their indices taking :obj:`already_pruned_heads` into account.

    Args:
        heads (:obj:`List[int]`): List of the indices of heads to prune.
        n_heads (:obj:`int`): The number of heads in the model.
        head_size (:obj:`int`): The size of each head.
        already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.

    Returns:
        :obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
    """
101
102
103
104
105
106
107
108
109
110
111
    mask = torch.ones(n_heads, head_size)
    heads = set(heads) - already_pruned_heads  # Convert to set and remove already pruned heads
    for head in heads:
        # Compute how many pruned heads are before the head and move the index accordingly
        head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
        mask[head] = 0
    mask = mask.view(-1).contiguous().eq(1)
    index: torch.LongTensor = torch.arange(len(mask))[mask].long()
    return heads, index


Lysandre Debut's avatar
Lysandre Debut committed
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
def get_parameter_device(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]):
    try:
        return next(parameter.parameters()).device
    except StopIteration:
        # For nn.DataParallel compatibility in PyTorch 1.5

        def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
            tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
            return tuples

        gen = parameter._named_members(get_members_fn=find_tensor_attributes)
        first_tuple = next(gen)
        return first_tuple[1].device


def get_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]):
    try:
        return next(parameter.parameters()).dtype
    except StopIteration:
        # For nn.DataParallel compatibility in PyTorch 1.5

        def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
            tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
            return tuples

        gen = parameter._named_members(get_members_fn=find_tensor_attributes)
        first_tuple = next(gen)
        return first_tuple[1].dtype


142
class ModuleUtilsMixin:
Julien Chaumond's avatar
Julien Chaumond committed
143
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
144
    A few utilities for :obj:`torch.nn.Modules`, to be used as a mixin.
Julien Chaumond's avatar
Julien Chaumond committed
145
146
    """

147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
    @staticmethod
    def _hook_rss_memory_pre_forward(module, *args, **kwargs):
        try:
            import psutil
        except (ImportError):
            raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")

        process = psutil.Process(os.getpid())
        mem = process.memory_info()
        module.mem_rss_pre_forward = mem.rss
        return None

    @staticmethod
    def _hook_rss_memory_post_forward(module, *args, **kwargs):
        try:
            import psutil
        except (ImportError):
            raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")

        process = psutil.Process(os.getpid())
        mem = process.memory_info()
        module.mem_rss_post_forward = mem.rss
        mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward
        module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0)
        return None

    def add_memory_hooks(self):
Sylvain Gugger's avatar
Sylvain Gugger committed
174
175
176
177
178
        """
        Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.

        Increase in memory consumption is stored in a :obj:`mem_rss_diff` attribute for each module and can be reset to
        zero with :obj:`model.reset_memory_hooks_state()`.
179
180
181
182
183
184
185
        """
        for module in self.modules():
            module.register_forward_pre_hook(self._hook_rss_memory_pre_forward)
            module.register_forward_hook(self._hook_rss_memory_post_forward)
        self.reset_memory_hooks_state()

    def reset_memory_hooks_state(self):
Sylvain Gugger's avatar
Sylvain Gugger committed
186
187
188
189
        """
        Reset the :obj:`mem_rss_diff` attribute of each module (see
        :func:`~transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks`).
        """
190
191
192
193
194
        for module in self.modules():
            module.mem_rss_diff = 0
            module.mem_rss_post_forward = 0
            module.mem_rss_pre_forward = 0

195
    @property
196
    def device(self) -> device:
197
        """
198
199
        :obj:`torch.device`: The device on which the module is (assuming that all the module parameters are on the same
        device).
200
        """
Lysandre Debut's avatar
Lysandre Debut committed
201
        return get_parameter_device(self)
202

203
    @property
204
    def dtype(self) -> torch.dtype:
205
        """
206
        :obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
207
        """
Lysandre Debut's avatar
Lysandre Debut committed
208
        return get_parameter_dtype(self)
209
210

    def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:
Sylvain Gugger's avatar
Sylvain Gugger committed
211
212
213
214
215
216
217
218
219
        """
        Invert an attention mask (e.g., switches 0. and 1.).

        Args:
            encoder_attention_mask (:obj:`torch.Tensor`): An attention mask.

        Returns:
            :obj:`torch.Tensor`: The inverted attention mask.
        """
220
221
222
223
224
225
226
227
228
229
        if encoder_attention_mask.dim() == 3:
            encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
        if encoder_attention_mask.dim() == 2:
            encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
        # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
        # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
        # /transformer/transformer_layers.py#L270
        # encoder_extended_attention_mask = (encoder_extended_attention_mask ==
        # encoder_extended_attention_mask.transpose(-1, -2))
        encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype)  # fp16 compatibility
230
231
232
233
234
235
236

        if self.dtype == torch.float16:
            encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4
        elif self.dtype == torch.float32:
            encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
        else:
            raise ValueError(
237
                f"{self.dtype} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`"
238
239
            )

240
241
        return encoder_extended_attention_mask

Sylvain Gugger's avatar
Sylvain Gugger committed
242
243
244
    def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device) -> Tensor:
        """
        Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
245
246

        Arguments:
Sylvain Gugger's avatar
Sylvain Gugger committed
247
248
249
250
251
252
            attention_mask (:obj:`torch.Tensor`):
                Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
            input_shape (:obj:`Tuple[int]`):
                The shape of the input to the model.
            device: (:obj:`torch.device`):
                The device of the input to the model.
253
254

        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
255
            :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
256
257
258
259
260
261
262
263
264
265
266
267
268
        """
        # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
        # ourselves in which case we just need to make it broadcastable to all heads.
        if attention_mask.dim() == 3:
            extended_attention_mask = attention_mask[:, None, :, :]
        elif attention_mask.dim() == 2:
            # Provided a padding mask of dimensions [batch_size, seq_length]
            # - if the model is a decoder, apply a causal mask in addition to the padding mask
            # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
            if self.config.is_decoder:
                batch_size, seq_length = input_shape
                seq_ids = torch.arange(seq_length, device=device)
                causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
269
                # in case past_key_values are used we need to add a prefix ones mask to the causal mask
Patrick von Platen's avatar
Patrick von Platen committed
270
271
272
                # causal and attention masks must have same type with pytorch version < 1.3
                causal_mask = causal_mask.to(attention_mask.dtype)

273
274
275
                if causal_mask.shape[1] < attention_mask.shape[1]:
                    prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
                    causal_mask = torch.cat(
Patrick von Platen's avatar
Patrick von Platen committed
276
277
278
279
280
281
282
                        [
                            torch.ones(
                                (batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
                            ),
                            causal_mask,
                        ],
                        axis=-1,
283
284
                    )

285
286
287
288
289
                extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
            else:
                extended_attention_mask = attention_mask[:, None, None, :]
        else:
            raise ValueError(
290
                f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
291
292
293
294
295
296
297
298
299
300
301
            )

        # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
        # masked positions, this operation will create a tensor which is 0.0 for
        # positions we want to attend and -10000.0 for masked positions.
        # Since we are adding it to the raw scores before the softmax, this is
        # effectively the same as removing these entirely.
        extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)  # fp16 compatibility
        extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
        return extended_attention_mask

Sylvain Gugger's avatar
Sylvain Gugger committed
302
303
304
    def get_head_mask(
        self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False
    ) -> Tensor:
305
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
306
307
308
309
310
311
312
        Prepare the head mask if needed.

        Args:
            head_mask (:obj:`torch.Tensor` with shape :obj:`[num_heads]` or :obj:`[num_hidden_layers x num_heads]`, `optional`):
                The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
            num_hidden_layers (:obj:`int`):
                The number of hidden layers in the model.
313
            is_attention_chunked: (:obj:`bool`, `optional`, defaults to :obj:`False`):
Sylvain Gugger's avatar
Sylvain Gugger committed
314
315
                Whether or not the attentions scores are computed by chunks or not.

316
        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
317
318
            :obj:`torch.Tensor` with shape :obj:`[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or
            list with :obj:`[None]` for each layer.
319
320
321
        """
        if head_mask is not None:
            head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
Patrick von Platen's avatar
Patrick von Platen committed
322
323
            if is_attention_chunked is True:
                head_mask = head_mask.unsqueeze(-1)
324
325
326
327
328
329
330
331
332
333
334
335
336
        else:
            head_mask = [None] * num_hidden_layers

        return head_mask

    def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
        """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
        if head_mask.dim() == 1:
            head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
            head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
        elif head_mask.dim() == 2:
            head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # We can specify head_mask for each layer
        assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
337
        head_mask = head_mask.to(dtype=self.dtype)  # switch to float if need + fp16 compatibility
338
339
        return head_mask

340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
    def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int:
        """
        Get number of (optionally, trainable or non-embeddings) parameters in the module.

        Args:
            only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to return only the number of trainable parameters

            exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to return only the number of non-embeddings parameters

        Returns:
            :obj:`int`: The number of parameters.
        """

355
356
357
358
359
360
361
362
363
364
        if exclude_embeddings:
            embedding_param_names = [
                f"{name}.weight" for name, module_type in self.named_modules() if isinstance(module_type, nn.Embedding)
            ]
            non_embedding_parameters = [
                parameter for name, parameter in self.named_parameters() if name not in embedding_param_names
            ]
            return sum(p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable)
        else:
            return sum(p.numel() for p in self.parameters() if p.requires_grad or not only_trainable)
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390

    def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int:
        """
        Helper function to estimate the total number of tokens from the model inputs.

        Args:
            inputs (:obj:`dict`): The model inputs.

        Returns:
            :obj:`int`: The total number of tokens.
        """
        token_inputs = [tensor for key, tensor in input_dict.items() if "input" in key]
        if token_inputs:
            return sum([token_input.numel() for token_input in token_inputs])
        else:
            warnings.warn(
                "Could not estimate the number of tokens of the input, floating-point operations will not be computed"
            )
            return 0

    def floating_point_ops(
        self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool = True
    ) -> int:
        """
        Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a
        batch with this transformer model. Default approximation neglects the quadratic dependency on the number of
Sylvain Gugger's avatar
Sylvain Gugger committed
391
        tokens (valid if :obj:`12 * d_model << sequence_length`) as laid out in `this paper
392
        <https://arxiv.org/pdf/2001.08361.pdf>`__ section 2.1. Should be overridden for transformers with parameter
Sylvain Gugger's avatar
Sylvain Gugger committed
393
        re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths.
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410

        Args:
            batch_size (:obj:`int`):
                The batch size for the forward pass.

            sequence_length (:obj:`int`):
                The number of tokens in each line of the batch.

            exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`True`):
                Whether or not to count embedding and softmax operations.

        Returns:
            :obj:`int`: The number of floating-point operations.
        """

        return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings)

Julien Chaumond's avatar
Julien Chaumond committed
411

Sylvain Gugger's avatar
Sylvain Gugger committed
412
class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMixin):
413
414
    r"""
    Base class for all models.
415

416
417
    :class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods
    for loading, downloading and saving models as well as a few methods common to all models to:
418

419
420
        * resize the input embeddings,
        * prune heads in the self-attention heads.
421

422
    Class attributes (overridden by derived classes):
Sylvain Gugger's avatar
Sylvain Gugger committed
423

424
425
        - **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of
          :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
Sylvain Gugger's avatar
Sylvain Gugger committed
426
427
        - **load_tf_weights** (:obj:`Callable`) -- A python `method` for loading a TensorFlow checkpoint in a PyTorch
          model, taking as arguments:
428

429
430
            - **model** (:class:`~transformers.PreTrainedModel`) -- An instance of the model on which to load the
              TensorFlow checkpoint.
Sylvain Gugger's avatar
Sylvain Gugger committed
431
432
            - **config** (:class:`~transformers.PreTrainedConfig`) -- An instance of the configuration associated to
              the model.
433
434
435
436
            - **path** (:obj:`str`) -- A path to the TensorFlow checkpoint.

        - **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in
          derived classes of the same architecture adding modules on top of the base model.
437
        - **is_parallelizable** (:obj:`bool`) -- A flag indicating whether this model supports model parallelization.
438
    """
439
    config_class = None
440
    base_model_prefix = ""
441
442
443
444
445
446
447
448
449
    # a list of re pattern of tensor names to ignore from the model when loading the model weights
    # (and avoid unnecessary warnings).
    _keys_to_ignore_on_load_missing = None
    # a list of re pattern of tensor names to ignore from the weights when loading the model weights
    # (and avoid unnecessary warnings).
    _keys_to_ignore_on_load_unexpected = None
    # a list of of tensor names to ignore when saving the model (useful for keys that aren't
    # trained, but which are deterministic)
    _keys_to_ignore_on_save = None
450

451
452
    is_parallelizable = False

453
    @property
454
    def dummy_inputs(self) -> Dict[str, torch.Tensor]:
455
456
        """
        :obj:`Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network.
457
        """
458
        return {"input_ids": torch.tensor(DUMMY_INPUTS)}
459

460
    def __init__(self, config: PretrainedConfig, *inputs, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
461
        super().__init__()
462
463
        if not isinstance(config, PretrainedConfig):
            raise ValueError(
464
465
466
                f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
                "`PretrainedConfig`. To create a model from a pretrained model use "
                f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
467
            )
468
        # Save config and origin of the pretrained weights if given in model
469
        self.config = config
470
        self.name_or_path = config.name_or_path
471

472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
    @classmethod
    def _from_config(cls, config, **kwargs):
        """
        All context managers that the model should be initialized under go here.

        Args:
            torch_dtype (:obj:`torch.dtype`, `optional`):
                Override the default ``torch.dtype`` and load the model under this dtype.
        """
        torch_dtype = kwargs.pop("torch_dtype", None)

        # override default dtype if needed
        dtype_orig = None
        if torch_dtype is not None:
            dtype_orig = cls._set_default_torch_dtype(torch_dtype)

        if is_deepspeed_zero3_enabled():
            import deepspeed

            logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
            # this immediately partitions the model across all gpus, to avoid the overhead in time
            # and memory copying it on CPU or each GPU first
            with deepspeed.zero.Init(config=deepspeed_config()):
                model = cls(config, **kwargs)
        else:
            model = cls(config, **kwargs)

        # restore default dtype if it was modified
        if dtype_orig is not None:
            torch.set_default_dtype(dtype_orig)

        return model

    @classmethod
    def _set_default_torch_dtype(cls, dtype: torch.dtype) -> torch.dtype:
        """
        Change the default dtype and return the previous one. This is needed when wanting to instantiate the model
        under specific dtype.

        Args:
            dtype (:obj:`torch.dtype`):
                a floating dtype to set to.

        Returns:
            :obj:`torch.dtype`: the original ``dtype`` that can be used to restore ``torch.set_default_dtype(dtype)``
            if it was modified. If it wasn't, returns :obj:`None`.

        Note ``set_default_dtype`` currently only works with floating-point types and asserts if for example,
        ``torch.int64`` is passed. So if a non-float ``dtype`` is passed this functions will throw an exception.
        """
        if not dtype.is_floating_point:
            raise ValueError(
                f"Can't instantiate {cls.__name__} model under dtype={dtype} since it is not a floating point dtype"
            )

        logger.info(f"Instantiating {cls.__name__} model under default dtype {dtype}.")
        dtype_orig = torch.get_default_dtype()
        torch.set_default_dtype(dtype)
        return dtype_orig

532
    @property
533
534
535
536
    def base_model(self) -> nn.Module:
        """
        :obj:`torch.nn.Module`: The main body of the model.
        """
537
        return getattr(self, self.base_model_prefix, self)
thomwolf's avatar
thomwolf committed
538

539
    def get_input_embeddings(self) -> nn.Module:
540
541
542
543
        """
        Returns the model's input embeddings.

        Returns:
544
            :obj:`nn.Module`: A torch module mapping vocabulary to hidden states.
thomwolf's avatar
thomwolf committed
545
        """
546
        base_model = getattr(self, self.base_model_prefix, self)
thomwolf's avatar
thomwolf committed
547
548
549
550
        if base_model is not self:
            return base_model.get_input_embeddings()
        else:
            raise NotImplementedError
thomwolf's avatar
thomwolf committed
551

552
    def set_input_embeddings(self, value: nn.Module):
553
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
554
        Set model's input embeddings.
555
556

        Args:
557
            value (:obj:`nn.Module`): A module mapping vocabulary to hidden states.
thomwolf's avatar
thomwolf committed
558
559
560
561
562
563
        """
        base_model = getattr(self, self.base_model_prefix, self)
        if base_model is not self:
            base_model.set_input_embeddings(value)
        else:
            raise NotImplementedError
thomwolf's avatar
thomwolf committed
564

565
    def get_output_embeddings(self) -> nn.Module:
566
567
568
569
        """
        Returns the model's output embeddings.

        Returns:
570
            :obj:`nn.Module`: A torch module mapping hidden states to vocabulary.
thomwolf's avatar
thomwolf committed
571
        """
572
        return None  # Overwrite for models with output embeddings
thomwolf's avatar
thomwolf committed
573

574
575
576
577
578
579
    def _init_weights(self, module):
        """
        Initialize the weights. This method should be overridden by derived class.
        """
        raise NotImplementedError(f"Make sure `_init_weigths` is implemented for {self.__class__}")

580
    def tie_weights(self):
581
582
        """
        Tie the weights between the input embeddings and the output embeddings.
583
584

        If the :obj:`torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
585
        the weights instead.
thomwolf's avatar
thomwolf committed
586
        """
thomwolf's avatar
thomwolf committed
587
        output_embeddings = self.get_output_embeddings()
588
        if output_embeddings is not None and self.config.tie_word_embeddings:
thomwolf's avatar
thomwolf committed
589
            self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
thomwolf's avatar
thomwolf committed
590

591
        if self.config.is_encoder_decoder and self.config.tie_encoder_decoder:
Weizhen's avatar
Weizhen committed
592
593
            if hasattr(self, self.base_model_prefix):
                self = getattr(self, self.base_model_prefix)
594
595
596
597
598
            self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix)

    @staticmethod
    def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str):
        uninitialized_encoder_weights: List[str] = []
Weizhen's avatar
Weizhen committed
599
600
601
602
        if decoder.__class__ != encoder.__class__:
            logger.info(
                f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized."
            )
603
604
605
606
607
608
609
610
611
612

        def tie_encoder_to_decoder_recursively(
            decoder_pointer: nn.Module,
            encoder_pointer: nn.Module,
            module_name: str,
            uninitialized_encoder_weights: List[str],
            depth=0,
        ):
            assert isinstance(decoder_pointer, nn.Module) and isinstance(
                encoder_pointer, nn.Module
613
            ), f"{decoder_pointer} and {encoder_pointer} have to be of type nn.Module"
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
            if hasattr(decoder_pointer, "weight"):
                assert hasattr(encoder_pointer, "weight")
                encoder_pointer.weight = decoder_pointer.weight
                if hasattr(decoder_pointer, "bias"):
                    assert hasattr(encoder_pointer, "bias")
                    encoder_pointer.bias = decoder_pointer.bias
                return

            encoder_modules = encoder_pointer._modules
            decoder_modules = decoder_pointer._modules
            if len(decoder_modules) > 0:
                assert (
                    len(encoder_modules) > 0
                ), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"

                all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()])
                encoder_layer_pos = 0
                for name, module in decoder_modules.items():
                    if name.isdigit():
                        encoder_name = str(int(name) + encoder_layer_pos)
                        decoder_name = name
Weizhen's avatar
Weizhen committed
635
636
637
                        if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len(
                            encoder_modules
                        ) != len(decoder_modules):
638
639
                            # this can happen if the name corresponds to the position in a list module list of layers
                            # in this case the decoder has added a cross-attention that the encoder does not have
640
                            # thus skip this step and subtract one layer pos from encoder
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
                            encoder_layer_pos -= 1
                            continue
                    elif name not in encoder_modules:
                        continue
                    elif depth > 500:
                        raise ValueError(
                            "Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model."
                        )
                    else:
                        decoder_name = encoder_name = name
                    tie_encoder_to_decoder_recursively(
                        decoder_modules[decoder_name],
                        encoder_modules[encoder_name],
                        module_name + "/" + name,
                        uninitialized_encoder_weights,
                        depth=depth + 1,
                    )
                    all_encoder_weights.remove(module_name + "/" + encoder_name)

                uninitialized_encoder_weights += list(all_encoder_weights)

        # tie weights recursively
        tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights)
        if len(uninitialized_encoder_weights) > 0:
            logger.warning(
                f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}"
            )

669
    def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
Lysandre's avatar
Lysandre committed
670
        """Tie or clone module weights depending of whether we are using TorchScript or not"""
thomwolf's avatar
thomwolf committed
671
        if self.config.torchscript:
672
            output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
thomwolf's avatar
thomwolf committed
673
        else:
674
            output_embeddings.weight = input_embeddings.weight
thomwolf's avatar
thomwolf committed
675

Sam Shleifer's avatar
Sam Shleifer committed
676
        if getattr(output_embeddings, "bias", None) is not None:
677
            output_embeddings.bias.data = nn.functional.pad(
678
                output_embeddings.bias.data,
Lysandre's avatar
Lysandre committed
679
680
681
682
                (
                    0,
                    output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],
                ),
683
684
                "constant",
                0,
685
            )
686
        if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
687
            output_embeddings.out_features = input_embeddings.num_embeddings
688

689
    def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
690
691
        """
        Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`.
692

693
        Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method.
thomwolf's avatar
thomwolf committed
694

695
696
697
698
        Arguments:
            new_num_tokens (:obj:`int`, `optional`):
                The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
                vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`,
699
                just returns a pointer to the input tokens :obj:`torch.nn.Embedding` module of the model without doing
700
701
702
703
                anything.

        Return:
            :obj:`torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model.
thomwolf's avatar
thomwolf committed
704
        """
705
        model_embeds = self._resize_token_embeddings(new_num_tokens)
thomwolf's avatar
thomwolf committed
706
707
        if new_num_tokens is None:
            return model_embeds
thomwolf's avatar
thomwolf committed
708
709
710

        # Update base model and current model config
        self.config.vocab_size = new_num_tokens
711
        self.vocab_size = new_num_tokens
thomwolf's avatar
thomwolf committed
712
713

        # Tie weights again if needed
714
        self.tie_weights()
thomwolf's avatar
thomwolf committed
715

thomwolf's avatar
thomwolf committed
716
717
        return model_embeds

718
    def _resize_token_embeddings(self, new_num_tokens):
thomwolf's avatar
thomwolf committed
719
720
721
        old_embeddings = self.get_input_embeddings()
        new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
        self.set_input_embeddings(new_embeddings)
722
723
724
725
726
727
728

        # if word embeddings are not tied, make sure that lm head is resized as well
        if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings:
            old_lm_head = self.get_output_embeddings()
            new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens)
            self.set_output_embeddings(new_lm_head)

thomwolf's avatar
thomwolf committed
729
        return self.get_input_embeddings()
730

731
    def _get_resized_embeddings(
732
733
        self, old_embeddings: nn.Embedding, new_num_tokens: Optional[int] = None
    ) -> nn.Embedding:
734
735
736
        """
        Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly
        initialized vectors at the end. Reducing the size will remove vectors from the end
737
738

        Args:
739
            old_embeddings (:obj:`torch.nn.Embedding`):
740
                Old embeddings to be resized.
741
            new_num_tokens (:obj:`int`, `optional`):
742
                New number of tokens in the embedding matrix.
743
744
745

                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
                vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
746
                :obj:`torch.nn.Embedding`` module of the model without doing anything.
747
748
749
750

        Return:
            :obj:`torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if
            :obj:`new_num_tokens` is :obj:`None`
751
752
753
754
        """
        if new_num_tokens is None:
            return old_embeddings

755
756
757
758
759
760
761
762
        if is_deepspeed_zero3_enabled():
            import deepspeed

            with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None):
                old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
        else:
            old_num_tokens, old_embedding_dim = old_embeddings.weight.size()

763
764
765
        if old_num_tokens == new_num_tokens:
            return old_embeddings

766
767
768
769
770
771
        if not isinstance(old_embeddings, nn.Embedding):
            raise TypeError(
                f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}."
                f"You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Embedding}."
            )

772
        # Build new embeddings
773
774
775
        new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim).to(
            self.device, dtype=old_embeddings.weight.dtype
        )
776
777
778
779

        # initialize all new embeddings (in particular added tokens)
        self._init_weights(new_embeddings)

780
        # Copy token embeddings from the previous weights
781
782
783
784
785
786
787
788
789
790
791

        # numbers of tokens to copy
        n = min(old_num_tokens, new_num_tokens)
        if is_deepspeed_zero3_enabled():
            import deepspeed

            with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=0):
                if torch.distributed.get_rank() == 0:
                    new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]
        else:
            new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]
792
793
794

        return new_embeddings

795
    def _get_resized_lm_head(
796
797
        self, old_lm_head: nn.Linear, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False
    ) -> nn.Linear:
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
        """
        Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized
        vectors at the end. Reducing the size will remove vectors from the end

        Args:
            old_lm_head (:obj:`torch.nn.Linear`):
                Old lm head liner layer to be resized.
            new_num_tokens (:obj:`int`, `optional`):
                New number of tokens in the linear matrix.

                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
                vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
                :obj:`torch.nn.Linear`` module of the model without doing anything.
            transposed (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether ``old_lm_head`` is transposed or not. If True ``old_lm_head.size()`` is ``lm_head_dim,
                vocab_size`` else ``vocab_size, lm_head_dim``.

        Return:
            :obj:`torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if
            :obj:`new_num_tokens` is :obj:`None`
        """
        if new_num_tokens is None:
            return old_lm_head

        old_num_tokens, old_lm_head_dim = (
            old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size()
        )

        if old_num_tokens == new_num_tokens:
            return old_lm_head

        if not isinstance(old_lm_head, nn.Linear):
            raise TypeError(
                f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}."
                f"You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Linear}."
            )

        # Build new lm head
        new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim)
        has_new_lm_head_bias = old_lm_head.bias is not None
        new_lm_head = nn.Linear(*new_lm_head_shape, bias=has_new_lm_head_bias).to(self.device)

        # initialize new lm head (in particular added tokens)
        self._init_weights(new_lm_head)

        num_tokens_to_copy = min(old_num_tokens, new_num_tokens)

        # Copy old lm head weights to new lm head
        if not transposed:
            new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :]
        else:
            new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy]

        # Copy bias weights to new lm head
        if has_new_lm_head_bias:
            new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy]

        return new_lm_head

857
    def init_weights(self):
858
        """
859
        If needed prunes and maybe initializes weights.
860
        """
861
862
863
864
        # Prune heads if needed
        if self.config.pruned_heads:
            self.prune_heads(self.config.pruned_heads)

865
866
867
868
869
870
871
        if _init_weights:
            # Initialize weights
            self.apply(self._init_weights)

            # Tie weights should be skipped when not initializing all weights
            # since from_pretrained(...) calls tie weights anyways
            self.tie_weights()
872

873
874
875
    def prune_heads(self, heads_to_prune: Dict[int, List[int]]):
        """
        Prunes heads of the base model.
876

877
878
        Arguments:
            heads_to_prune (:obj:`Dict[int, List[int]]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
879
880
881
                Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of
                heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads
                0 and 2 on layer 1 and heads 2 and 3 on layer 2.
thomwolf's avatar
thomwolf committed
882
        """
883
        # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
884
        for layer, heads in heads_to_prune.items():
885
886
887
            union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
            self.config.pruned_heads[layer] = list(union_heads)  # Unfortunately we have to store it as list for JSON

888
        self.base_model._prune_heads(heads_to_prune)
thomwolf's avatar
thomwolf committed
889

890
891
892
893
894
895
    def save_pretrained(
        self,
        save_directory: Union[str, os.PathLike],
        save_config: bool = True,
        state_dict: Optional[dict] = None,
        save_function: Callable = torch.save,
Sylvain Gugger's avatar
Sylvain Gugger committed
896
897
        push_to_hub: bool = False,
        **kwargs,
898
    ):
899
900
901
        """
        Save a model and its configuration file to a directory, so that it can be re-loaded using the
        `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
902

903
        Arguments:
904
            save_directory (:obj:`str` or :obj:`os.PathLike`):
905
                Directory to which to save. Will be created if it doesn't exist.
906
907
908
909
910
911
912
913
914
915
916
            save_config (:obj:`bool`, `optional`, defaults to :obj:`True`):
                Whether or not to save the config of the model. Useful when in distributed training like TPUs and need
                to call this function on all processes. In this case, set :obj:`save_config=True` only on the main
                process to avoid race conditions.
            state_dict (nested dictionary of :obj:`torch.Tensor`):
                The state dictionary of the model to save. Will default to :obj:`self.state_dict()`, but can be used to
                only save parts of the model or if special precautions need to be taken when recovering the state
                dictionary of a model (like when using model parallelism).
            save_function (:obj:`Callable`):
                The function to use to save the state dictionary. Useful on distributed training like TPUs when one
                need to replace :obj:`torch.save` by another method.
Sylvain Gugger's avatar
Sylvain Gugger committed
917
918
            push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to push your model to the Hugging Face model hub after saving it.
919
920
921
922
923
924
925
926

                .. warning::

                    Using :obj:`push_to_hub=True` will synchronize the repository you are pushing to with
                    :obj:`save_directory`, which requires :obj:`save_directory` to be a local clone of the repo you are
                    pushing to if it's an existing folder. Pass along :obj:`temp_dir=True` to use a temporary directory
                    instead.

Sylvain Gugger's avatar
Sylvain Gugger committed
927
928
929
            kwargs:
                Additional key word arguments passed along to the
                :meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method.
930
        """
931
        if os.path.isfile(save_directory):
932
            logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
933
            return
934
935
936
937
938

        if push_to_hub:
            commit_message = kwargs.pop("commit_message", None)
            repo = self._create_or_get_repo(save_directory, **kwargs)

939
        os.makedirs(save_directory, exist_ok=True)
940

Julien Chaumond's avatar
Julien Chaumond committed
941
        # Only save the model itself if we are using distributed training
942
        model_to_save = unwrap_model(self)
943

944
945
946
947
948
        # save the string version of dtype to the config, e.g. convert torch.float32 => "float32"
        # we currently don't use this setting automatically, but may start to use with v5
        dtype = get_parameter_dtype(model_to_save)
        model_to_save.config.torch_dtype = str(dtype).split(".")[1]

Julien Chaumond's avatar
Julien Chaumond committed
949
950
951
        # Attach architecture to the config
        model_to_save.config.architectures = [model_to_save.__class__.__name__]

952
953
954
955
956
957
958
        # Save the config
        if save_config:
            model_to_save.config.save_pretrained(save_directory)

        # Save the model
        if state_dict is None:
            state_dict = model_to_save.state_dict()
959
960

        # Handle the case where some state_dict keys shouldn't be saved
961
962
        if self._keys_to_ignore_on_save is not None:
            state_dict = {k: v for k, v in state_dict.items() if k not in self._keys_to_ignore_on_save}
963

964
965
        # If we save using the predefined names, we can load using `from_pretrained`
        output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
966
        save_function(state_dict, output_model_file)
967

968
        logger.info(f"Model weights saved in {output_model_file}")
969

Sylvain Gugger's avatar
Sylvain Gugger committed
970
        if push_to_hub:
971
            url = self._push_to_hub(repo, commit_message=commit_message)
Sylvain Gugger's avatar
Sylvain Gugger committed
972
973
            logger.info(f"Model pushed to the hub in this commit: {url}")

974
    @classmethod
975
    def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
976
977
        r"""
        Instantiate a pretrained pytorch model from a pre-trained model configuration.
978

Sylvain Gugger's avatar
Sylvain Gugger committed
979
980
        The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated). To
        train the model, you should first set it back in training mode with ``model.train()``.
981

982
983
984
        The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come
        pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
        task.
985

986
987
        The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those
        weights are discarded.
988

989
        Parameters:
990
            pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`, `optional`):
991
992
                Can be either:

993
994
995
                    - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
                      Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
                      a user or organization name, like ``dbmdz/bert-base-german-cased``.
996
997
                    - A path to a `directory` containing model weights saved using
                      :func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
Sylvain Gugger's avatar
Sylvain Gugger committed
998
                    - A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
999
1000
1001
                      this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
                      as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
                      a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
1002
1003
1004
                    - A path or url to a model folder containing a `flax checkpoint file` in `.msgpack` format (e.g,
                      ``./flax_model/`` containing ``flax_model.msgpack``). In this case, ``from_flax`` should be set
                      to :obj:`True`.
1005
1006
1007
1008
                    - :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
                      arguments ``config`` and ``state_dict``).
            model_args (sequence of positional arguments, `optional`):
                All remaning positional arguments will be passed to the underlying model's ``__init__`` method.
1009
            config (:obj:`Union[PretrainedConfig, str, os.PathLike]`, `optional`):
1010
1011
1012
                Can be either:

                    - an instance of a class derived from :class:`~transformers.PretrainedConfig`,
1013
                    - a string or path valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`.
1014
1015
1016
1017

                Configuration for the model to use instead of an automatically loaded configuation. Configuration can
                be automatically loaded when:

1018
1019
                    - The model is a model provided by the library (loaded with the `model id` string of a pretrained
                      model).
1020
                    - The model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
1021
1022
                      by supplying the save directory.
                    - The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
1023
1024
1025
1026
1027
1028
1029
1030
                      configuration JSON file named `config.json` is found in the directory.
            state_dict (:obj:`Dict[str, torch.Tensor]`, `optional`):
                A state dictionary to use instead of a state dictionary loaded from saved weights file.

                This option can be used if you want to create a model from a pretrained configuration but load your own
                weights. In this case though, you should check if using
                :func:`~transformers.PreTrainedModel.save_pretrained` and
                :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
1031
            cache_dir (:obj:`Union[str, os.PathLike]`, `optional`):
1032
1033
1034
1035
1036
                Path to a directory in which a downloaded pretrained model configuration should be cached if the
                standard cache should not be used.
            from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Load the model weights from a TensorFlow checkpoint save file (see docstring of
                ``pretrained_model_name_or_path`` argument).
1037
1038
1039
            from_flax (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Load the model weights from a Flax checkpoint save file (see docstring of
                ``pretrained_model_name_or_path`` argument).
1040
1041
1042
1043
1044
1045
            force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to delete incompletely received files. Will attempt to resume the download if such a
                file exists.
Sylvain Gugger's avatar
Sylvain Gugger committed
1046
            proxies (:obj:`Dict[str, str], `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1047
1048
                A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
1049
            output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1050
                Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
1051
            local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Stas Bekman's avatar
Stas Bekman committed
1052
                Whether or not to only look at local files (i.e., do not try to download the model).
1053
1054
1055
            use_auth_token (:obj:`str` or `bool`, `optional`):
                The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
                generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
Julien Chaumond's avatar
Julien Chaumond committed
1056
1057
1058
1059
            revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
                identifier allowed by git.
1060
            mirror(:obj:`str`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1061
1062
1063
                Mirror source to accelerate downloads in China. If you are from China and have an accessibility
                problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
                Please refer to the mirror site for more information.
1064
1065
            _fast_init(:obj:`bool`, `optional`, defaults to `:obj:`True`):
                Whether or not to disable fast initialization.
1066
1067
1068
            torch_dtype (:obj:`str` or :obj:`torch.dtype`, `optional`):
                Override the default ``torch.dtype`` and load the model under this dtype. If ``"auto"`` is passed the
                dtype will be automatically derived from the model's weights.
1069
1070
1071
1072
1073
1074
1075
1076

                .. warning::

                    One should only disable `_fast_init` to ensure backwards compatibility with
                    ``transformers.__version__ < 4.6.0`` for seeded model initialization. This argument will be removed
                    at the next major version. See `pull request 11471
                    <https://github.com/huggingface/transformers/pull/11471>`__ for more information.

1077
1078
            kwargs (remaining dictionary of keyword arguments, `optional`):
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
1079
                :obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
                automatically loaded:

                    - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
                      underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
                      already been done)
                    - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
                      initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
                      ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
                      with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
                      attribute will be passed to the underlying model's ``__init__`` function.
1090

1091
1092
1093
1094
        .. note::

            Passing :obj:`use_auth_token=True` is required when you want to use a private model.

1095
1096
1097
1098
1099
1100
        .. note::

            Activate the special `"offline-mode"
            <https://huggingface.co/transformers/installation.html#offline-mode>`__ to use this method in a firewalled
            environment.

1101
        Examples::
thomwolf's avatar
thomwolf committed
1102

1103
            >>> from transformers import BertConfig, BertModel
1104
            >>> # Download model and configuration from huggingface.co and cache.
1105
1106
1107
1108
1109
1110
1111
1112
1113
            >>> model = BertModel.from_pretrained('bert-base-uncased')
            >>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable).
            >>> model = BertModel.from_pretrained('./test/saved_model/')
            >>> # Update configuration during loading.
            >>> model = BertModel.from_pretrained('bert-base-uncased', output_attentions=True)
            >>> assert model.config.output_attentions == True
            >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).
            >>> config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
            >>> model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
1114
1115
1116
            >>> # Loading from a Flax checkpoint file instead of a PyTorch model (slower)
            >>> model = BertModel.from_pretrained('bert-base-uncased', from_flax=True)

1117
        """
1118
1119
1120
1121
        config = kwargs.pop("config", None)
        state_dict = kwargs.pop("state_dict", None)
        cache_dir = kwargs.pop("cache_dir", None)
        from_tf = kwargs.pop("from_tf", False)
1122
        from_flax = kwargs.pop("from_flax", False)
1123
1124
1125
1126
        force_download = kwargs.pop("force_download", False)
        resume_download = kwargs.pop("resume_download", False)
        proxies = kwargs.pop("proxies", None)
        output_loading_info = kwargs.pop("output_loading_info", False)
1127
        local_files_only = kwargs.pop("local_files_only", False)
1128
        use_auth_token = kwargs.pop("use_auth_token", None)
Julien Chaumond's avatar
Julien Chaumond committed
1129
        revision = kwargs.pop("revision", None)
1130
        mirror = kwargs.pop("mirror", None)
1131
1132
        from_pipeline = kwargs.pop("_from_pipeline", None)
        from_auto_class = kwargs.pop("_from_auto", False)
1133
        _fast_init = kwargs.pop("_fast_init", True)
1134
1135
1136
        torch_dtype = kwargs.pop("torch_dtype", None)

        from_pt = not (from_tf | from_flax)
1137
1138
1139
1140

        user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class}
        if from_pipeline is not None:
            user_agent["using_pipeline"] = from_pipeline
thomwolf's avatar
thomwolf committed
1141

1142
1143
1144
1145
        if is_offline_mode() and not local_files_only:
            logger.info("Offline mode: forcing local_files_only=True")
            local_files_only = True

1146
1147
1148
        # Load config if we don't provide a configuration
        if not isinstance(config, PretrainedConfig):
            config_path = config if config is not None else pretrained_model_name_or_path
1149
            config, model_kwargs = cls.config_class.from_pretrained(
1150
1151
1152
1153
                config_path,
                *model_args,
                cache_dir=cache_dir,
                return_unused_kwargs=True,
1154
                force_download=force_download,
1155
                resume_download=resume_download,
1156
                proxies=proxies,
1157
                local_files_only=local_files_only,
1158
                use_auth_token=use_auth_token,
Julien Chaumond's avatar
Julien Chaumond committed
1159
                revision=revision,
1160
1161
                _from_auto=from_auto_class,
                _from_pipeline=from_pipeline,
1162
                **kwargs,
1163
1164
1165
            )
        else:
            model_kwargs = kwargs
1166

thomwolf's avatar
thomwolf committed
1167
        # Load model
thomwolf's avatar
thomwolf committed
1168
        if pretrained_model_name_or_path is not None:
1169
            pretrained_model_name_or_path = str(pretrained_model_name_or_path)
1170
            if os.path.isdir(pretrained_model_name_or_path):
thomwolf's avatar
thomwolf committed
1171
                if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
1172
                    # Load from a TF 1.0 checkpoint in priority if from_tf
thomwolf's avatar
thomwolf committed
1173
                    archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
thomwolf's avatar
thomwolf committed
1174
                elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
1175
                    # Load from a TF 2.0 checkpoint in priority if from_tf
thomwolf's avatar
thomwolf committed
1176
                    archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
1177
1178
1179
                elif from_flax and os.path.isfile(os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)):
                    # Load from a Flax checkpoint in priority if from_flax
                    archive_file = os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)
thomwolf's avatar
thomwolf committed
1180
1181
                elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
                    # Load from a PyTorch checkpoint
thomwolf's avatar
thomwolf committed
1182
                    archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
thomwolf's avatar
thomwolf committed
1183
                else:
1184
                    raise EnvironmentError(
1185
1186
                        f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + '.index', FLAX_WEIGHTS_NAME]} found in "
                        f"directory {pretrained_model_name_or_path} or `from_tf` and `from_flax` set to False."
1187
                    )
1188
            elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
1189
                archive_file = pretrained_model_name_or_path
1190
            elif os.path.isfile(pretrained_model_name_or_path + ".index"):
1191
1192
1193
1194
1195
                if not from_tf:
                    raise ValueError(
                        f"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set "
                        "from_tf to True to load from this checkpoint."
                    )
1196
                archive_file = pretrained_model_name_or_path + ".index"
1197
            else:
1198
1199
1200
1201
1202
1203
1204
1205
                # set correct filename
                if from_tf:
                    filename = TF2_WEIGHTS_NAME
                elif from_flax:
                    filename = FLAX_WEIGHTS_NAME
                else:
                    filename = WEIGHTS_NAME

thomwolf's avatar
thomwolf committed
1206
                archive_file = hf_bucket_url(
Julien Chaumond's avatar
Julien Chaumond committed
1207
                    pretrained_model_name_or_path,
1208
                    filename=filename,
Julien Chaumond's avatar
Julien Chaumond committed
1209
                    revision=revision,
1210
                    mirror=mirror,
thomwolf's avatar
thomwolf committed
1211
                )
1212

thomwolf's avatar
thomwolf committed
1213
            try:
1214
                # Load from URL or cache if already cached
1215
1216
1217
1218
1219
1220
                resolved_archive_file = cached_path(
                    archive_file,
                    cache_dir=cache_dir,
                    force_download=force_download,
                    proxies=proxies,
                    resume_download=resume_download,
1221
                    local_files_only=local_files_only,
1222
                    use_auth_token=use_auth_token,
1223
                    user_agent=user_agent,
1224
                )
Julien Chaumond's avatar
Julien Chaumond committed
1225
1226
            except EnvironmentError as err:
                logger.error(err)
1227
1228
1229
1230
1231
                msg = (
                    f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
                    f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
                    f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n"
                )
thomwolf's avatar
thomwolf committed
1232
1233
                raise EnvironmentError(msg)

thomwolf's avatar
thomwolf committed
1234
            if resolved_archive_file == archive_file:
1235
                logger.info(f"loading weights file {archive_file}")
1236
            else:
1237
                logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}")
1238
        else:
thomwolf's avatar
thomwolf committed
1239
            resolved_archive_file = None
1240

1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
        # load pt weights early so that we know which dtype to init the model under
        if from_pt:
            if state_dict is None:
                try:
                    state_dict = torch.load(resolved_archive_file, map_location="cpu")
                except Exception:
                    raise OSError(
                        f"Unable to load weights from pytorch checkpoint file for '{pretrained_model_name_or_path}' "
                        f"at '{resolved_archive_file}'"
                        "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
                    )

            # set dtype to instantiate the model under:
            # 1. If torch_dtype is not None, we use that dtype
            # 2. If torch_dtype is "auto", we auto-detect dtype from the loaded state_dict, by checking its first
            #    weights entry - we assume all weights are of the same dtype
            # we also may have config.torch_dtype available, but we won't rely on it till v5
            dtype_orig = None
            if torch_dtype is not None:
                if isinstance(torch_dtype, str):
                    if torch_dtype == "auto":
                        torch_dtype = next(iter(state_dict.values())).dtype
                    else:
                        raise ValueError(
                            f"`torch_dtype` can be either a `torch.dtype` or `auto`, but received {torch_dtype}"
                        )
                dtype_orig = cls._set_default_torch_dtype(torch_dtype)

1269
1270
        config.name_or_path = pretrained_model_name_or_path

1271
        # Instantiate model.
1272
1273
1274
1275
        if is_deepspeed_zero3_enabled():
            import deepspeed

            logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
1276
1277
            # this immediately partitions the model across all gpus, to avoid the overhead in time
            # and memory copying it on CPU or each GPU first
1278
            with deepspeed.zero.Init(config=deepspeed_config()):
1279
1280
                with no_init_weights(_enable=_fast_init):
                    model = cls(config, *model_args, **model_kwargs)
1281
        else:
1282
1283
            with no_init_weights(_enable=_fast_init):
                model = cls(config, *model_args, **model_kwargs)
1284

1285
1286
1287
1288
1289
        if from_pt:
            # restore default dtype
            if dtype_orig is not None:
                torch.set_default_dtype(dtype_orig)

1290
        if from_tf:
1291
            if resolved_archive_file.endswith(".index"):
1292
1293
1294
1295
1296
                # Load from a TensorFlow 1.X checkpoint - provided by original authors
                model = cls.load_tf_weights(model, config, resolved_archive_file[:-6])  # Remove the '.index'
            else:
                # Load from our TensorFlow 2.0 checkpoints
                try:
1297
                    from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model
1298

1299
                    model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
1300
                except ImportError:
1301
1302
1303
1304
                    logger.error(
                        "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
                        "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
                    )
1305
                    raise
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
        elif from_flax:
            try:
                from .modeling_flax_pytorch_utils import load_flax_checkpoint_in_pytorch_model

                model = load_flax_checkpoint_in_pytorch_model(model, resolved_archive_file)
            except ImportError:
                logger.error(
                    "Loading a Flax model in PyTorch, requires both PyTorch and Flax to be installed. Please see "
                    "https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation instructions."
                )
                raise
1317
        elif from_pt:
1318
            model, missing_keys, unexpected_keys, error_msgs = cls._load_state_dict_into_model(
1319
                model, state_dict, pretrained_model_name_or_path, _fast_init=_fast_init
1320
1321
            )

1322
1323
        # make sure token embedding weights are still tied if needed
        model.tie_weights()
1324

1325
        # Set model in evaluation mode to deactivate DropOut modules by default
1326
1327
        model.eval()

thomwolf's avatar
thomwolf committed
1328
        if output_loading_info:
1329
1330
1331
1332
1333
            loading_info = {
                "missing_keys": missing_keys,
                "unexpected_keys": unexpected_keys,
                "error_msgs": error_msgs,
            }
thomwolf's avatar
thomwolf committed
1334
1335
            return model, loading_info

1336
1337
        return model

1338
    @classmethod
1339
    def _load_state_dict_into_model(cls, model, state_dict, pretrained_model_name_or_path, _fast_init=True):
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362

        # Convert old format to new format if needed from a PyTorch state_dict
        old_keys = []
        new_keys = []
        for key in state_dict.keys():
            new_key = None
            if "gamma" in key:
                new_key = key.replace("gamma", "weight")
            if "beta" in key:
                new_key = key.replace("beta", "bias")
            if new_key:
                old_keys.append(key)
                new_keys.append(new_key)
        for old_key, new_key in zip(old_keys, new_keys):
            state_dict[new_key] = state_dict.pop(old_key)

        # Retrieve missing & unexpected_keys
        expected_keys = list(model.state_dict().keys())
        loaded_keys = list(state_dict.keys())
        prefix = model.base_model_prefix

        has_prefix_module = any(s.startswith(prefix) for s in loaded_keys)
        expects_prefix_module = any(s.startswith(prefix) for s in expected_keys)
Patrick von Platen's avatar
Patrick von Platen committed
1363
1364
1365

        # key re-naming operations are never done on the keys
        # that are loaded, but always on the keys of the newly initialized model
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
        remove_prefix = not has_prefix_module and expects_prefix_module
        add_prefix = has_prefix_module and not expects_prefix_module

        if remove_prefix:
            expected_keys = [".".join(s.split(".")[1:]) if s.startswith(prefix) else s for s in expected_keys]
        elif add_prefix:
            expected_keys = [".".join([prefix, s]) for s in expected_keys]

        missing_keys = list(set(expected_keys) - set(loaded_keys))
        unexpected_keys = list(set(loaded_keys) - set(expected_keys))

        # Some models may have keys that are not in the state by design, removing them before needlessly warning
        # the user.
        if cls._keys_to_ignore_on_load_missing is not None:
            for pat in cls._keys_to_ignore_on_load_missing:
                missing_keys = [k for k in missing_keys if re.search(pat, k) is None]

        if cls._keys_to_ignore_on_load_unexpected is not None:
            for pat in cls._keys_to_ignore_on_load_unexpected:
                unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]

1387
1388
1389
1390
1391
1392
1393
1394
        if _fast_init:
            # retrieve unintialized modules and initialize
            unintialized_modules = model.retrieve_modules_from_names(
                missing_keys, add_prefix=add_prefix, remove_prefix=remove_prefix
            )
            for module in unintialized_modules:
                model._init_weights(module)

1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
        # copy state_dict so _load_from_state_dict can modify it
        metadata = getattr(state_dict, "_metadata", None)
        state_dict = state_dict.copy()
        if metadata is not None:
            state_dict._metadata = metadata

        error_msgs = []

        # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
        # so we need to apply the function recursively.
        def load(module: nn.Module, prefix=""):
            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
            args = (state_dict, prefix, local_metadata, True, [], [], error_msgs)
            if is_deepspeed_zero3_enabled():
                import deepspeed

                # because zero3 puts placeholders in model params, this context
                # manager gathers (unpartitions) the params of the current layer, then loads from
                # the state dict and then re-partitions them again
                with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0):
                    if torch.distributed.get_rank() == 0:
                        module._load_from_state_dict(*args)
            else:
                module._load_from_state_dict(*args)

            for name, child in module._modules.items():
                if child is not None:
                    load(child, prefix + name + ".")

        # Make sure we are able to load base models as well as derived models (with heads)
        start_prefix = ""
        model_to_load = model
        if not hasattr(model, cls.base_model_prefix) and has_prefix_module:
            start_prefix = cls.base_model_prefix + "."
        if hasattr(model, cls.base_model_prefix) and not has_prefix_module:
            model_to_load = getattr(model, cls.base_model_prefix)

        load(model_to_load, prefix=start_prefix)

        if len(unexpected_keys) > 0:
            logger.warning(
                f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when "
                f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
                f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
                f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
                f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
                f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
            )
        else:
            logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
        if len(missing_keys) > 0:
            logger.warning(
                f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
                f"and are newly initialized: {missing_keys}\n"
                f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
            )
        else:
            logger.info(
                f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
                f"If your task is similar to the task the model of the checkpoint was trained on, "
                f"you can already use {model.__class__.__name__} for predictions without further training."
            )
        if len(error_msgs) > 0:
            error_msg = "\n\t".join(error_msgs)
            raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}")

        return model, missing_keys, unexpected_keys, error_msgs

    def retrieve_modules_from_names(self, names, add_prefix=False, remove_prefix=False):
        module_keys = set([".".join(key.split(".")[:-1]) for key in names])

Patrick von Platen's avatar
Patrick von Platen committed
1466
1467
1468
1469
        # torch.nn.ParameterList is a special case where two parameter keywords
        # are appended to the module name, *e.g.* bert.special_embeddings.0
        module_keys = module_keys.union(set([".".join(key.split(".")[:-2]) for key in names if key[-1].isdigit()]))

1470
1471
1472
1473
1474
1475
        retrieved_modules = []
        # retrieve all modules that has at least one missing weight name
        for name, module in self.named_modules():
            if remove_prefix:
                name = ".".join(name.split(".")[1:]) if name.startswith(self.base_model_prefix) else name
            elif add_prefix:
Patrick von Platen's avatar
Patrick von Platen committed
1476
                name = ".".join([self.base_model_prefix, name]) if len(name) > 0 else self.base_model_prefix
1477
1478
1479
1480
1481
1482

            if name in module_keys:
                retrieved_modules.append(module)

        return retrieved_modules

thomwolf's avatar
thomwolf committed
1483

thomwolf's avatar
thomwolf committed
1484
class Conv1D(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
    """
    1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).

    Basically works like a linear layer but the weights are transposed.

    Args:
        nf (:obj:`int`): The number of output features.
        nx (:obj:`int`): The number of input features.
    """

thomwolf's avatar
thomwolf committed
1495
    def __init__(self, nf, nx):
Julien Chaumond's avatar
Julien Chaumond committed
1496
        super().__init__()
thomwolf's avatar
thomwolf committed
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
        self.nf = nf
        w = torch.empty(nx, nf)
        nn.init.normal_(w, std=0.02)
        self.weight = nn.Parameter(w)
        self.bias = nn.Parameter(torch.zeros(nf))

    def forward(self, x):
        size_out = x.size()[:-1] + (self.nf,)
        x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
        x = x.view(*size_out)
        return x


thomwolf's avatar
thomwolf committed
1510
class PoolerStartLogits(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1511
1512
    """
    Compute SQuAD start logits from sequence hidden states.
1513

Sylvain Gugger's avatar
Sylvain Gugger committed
1514
1515
1516
1517
1518
1519
    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config used by the model, will be used to grab the :obj:`hidden_size` of the model.
    """

    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
1520
        super().__init__()
thomwolf's avatar
thomwolf committed
1521
1522
        self.dense = nn.Linear(config.hidden_size, 1)

Sylvain Gugger's avatar
Sylvain Gugger committed
1523
1524
1525
1526
1527
1528
1529
1530
    def forward(
        self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None
    ) -> torch.FloatTensor:
        """
        Args:
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
                The final hidden states of the model.
            p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1531
1532
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
Sylvain Gugger's avatar
Sylvain Gugger committed
1533
1534
1535

        Returns:
            :obj:`torch.FloatTensor`: The start logits for SQuAD.
thomwolf's avatar
thomwolf committed
1536
        """
thomwolf's avatar
thomwolf committed
1537
1538
1539
        x = self.dense(hidden_states).squeeze(-1)

        if p_mask is not None:
Lysandre Debut's avatar
Lysandre Debut committed
1540
            if get_parameter_dtype(self) == torch.float16:
1541
1542
1543
                x = x * (1 - p_mask) - 65500 * p_mask
            else:
                x = x * (1 - p_mask) - 1e30 * p_mask
thomwolf's avatar
thomwolf committed
1544
1545
1546
1547
1548
1549

        return x


class PoolerEndLogits(nn.Module):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1550
    Compute SQuAD end logits from sequence hidden states.
1551

Sylvain Gugger's avatar
Sylvain Gugger committed
1552
1553
1554
1555
1556
1557
1558
    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the
            :obj:`layer_norm_eps` to use.
    """

    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
1559
        super().__init__()
thomwolf's avatar
thomwolf committed
1560
1561
1562
1563
1564
        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
        self.activation = nn.Tanh()
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dense_1 = nn.Linear(config.hidden_size, 1)

Sylvain Gugger's avatar
Sylvain Gugger committed
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
    def forward(
        self,
        hidden_states: torch.FloatTensor,
        start_states: Optional[torch.FloatTensor] = None,
        start_positions: Optional[torch.LongTensor] = None,
        p_mask: Optional[torch.FloatTensor] = None,
    ) -> torch.FloatTensor:
        """
        Args:
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
                The final hidden states of the model.
            start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`):
                The hidden states of the first tokens for the labeled span.
            start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                The position of the first token for the labeled span.
            p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1581
1582
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
Sylvain Gugger's avatar
Sylvain Gugger committed
1583
1584
1585
1586
1587
1588
1589
1590

        .. note::

            One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set,
            ``start_positions`` overrides ``start_states``.

        Returns:
            :obj:`torch.FloatTensor`: The end logits for SQuAD.
thomwolf's avatar
thomwolf committed
1591
        """
1592
1593
1594
        assert (
            start_states is not None or start_positions is not None
        ), "One of start_states, start_positions should be not None"
thomwolf's avatar
thomwolf committed
1595
        if start_positions is not None:
1596
            slen, hsz = hidden_states.shape[-2:]
1597
1598
1599
            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            start_states = hidden_states.gather(-2, start_positions)  # shape (bsz, 1, hsz)
            start_states = start_states.expand(-1, slen, -1)  # shape (bsz, slen, hsz)
thomwolf's avatar
thomwolf committed
1600
1601
1602
1603
1604
1605
1606

        x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
        x = self.activation(x)
        x = self.LayerNorm(x)
        x = self.dense_1(x).squeeze(-1)

        if p_mask is not None:
Lysandre Debut's avatar
Lysandre Debut committed
1607
            if get_parameter_dtype(self) == torch.float16:
1608
1609
1610
                x = x * (1 - p_mask) - 65500 * p_mask
            else:
                x = x * (1 - p_mask) - 1e30 * p_mask
thomwolf's avatar
thomwolf committed
1611
1612
1613
1614
1615

        return x


class PoolerAnswerClass(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1616
1617
1618
1619
1620
1621
1622
    """
    Compute SQuAD 2.0 answer class from classification and start tokens hidden states.

    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config used by the model, will be used to grab the :obj:`hidden_size` of the model.
    """
1623

thomwolf's avatar
thomwolf committed
1624
    def __init__(self, config):
Julien Chaumond's avatar
Julien Chaumond committed
1625
        super().__init__()
thomwolf's avatar
thomwolf committed
1626
1627
1628
1629
        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
        self.activation = nn.Tanh()
        self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)

Sylvain Gugger's avatar
Sylvain Gugger committed
1630
1631
1632
1633
1634
1635
1636
    def forward(
        self,
        hidden_states: torch.FloatTensor,
        start_states: Optional[torch.FloatTensor] = None,
        start_positions: Optional[torch.LongTensor] = None,
        cls_index: Optional[torch.LongTensor] = None,
    ) -> torch.FloatTensor:
1637
1638
        """
        Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
                The final hidden states of the model.
            start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`):
                The hidden states of the first tokens for the labeled span.
            start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                The position of the first token for the labeled span.
            cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token.

        .. note::

            One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set,
            ``start_positions`` overrides ``start_states``.

        Returns:
            :obj:`torch.FloatTensor`: The SQuAD 2.0 answer class.
thomwolf's avatar
thomwolf committed
1655
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
1656
        # No dependency on end_feature so that we can obtain one single `cls_logits` for each sample.
1657
        hsz = hidden_states.shape[-1]
1658
1659
1660
        assert (
            start_states is not None or start_positions is not None
        ), "One of start_states, start_positions should be not None"
thomwolf's avatar
thomwolf committed
1661
        if start_positions is not None:
1662
1663
            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            start_states = hidden_states.gather(-2, start_positions).squeeze(-2)  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
1664
1665

        if cls_index is not None:
1666
1667
            cls_index = cls_index[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2)  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
1668
        else:
1669
            cls_token_state = hidden_states[:, -1, :]  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
1670
1671
1672
1673
1674
1675
1676
1677

        x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
        x = self.activation(x)
        x = self.dense_1(x).squeeze(-1)

        return x


1678
1679
1680
@dataclass
class SquadHeadOutput(ModelOutput):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1681
    Base class for outputs of question answering models using a :class:`~transformers.modeling_utils.SQuADHead`.
1682
1683
1684

    Args:
        loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Sylvain Gugger's avatar
Sylvain Gugger committed
1685
1686
            Classification loss as the sum of start token, end token (and is_impossible if provided) classification
            losses.
1687
1688
1689
1690
1691
        start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
            Log probabilities for the top config.start_n_top start token possibilities (beam-search).
        start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
            Indices for the top config.start_n_top start token possibilities (beam-search).
        end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Sylvain Gugger's avatar
Sylvain Gugger committed
1692
1693
            Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities
            (beam-search).
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
        end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
            Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
        cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
            Log probabilities for the ``is_impossible`` label of the answers.

    """

    loss: Optional[torch.FloatTensor] = None
    start_top_log_probs: Optional[torch.FloatTensor] = None
    start_top_index: Optional[torch.LongTensor] = None
    end_top_log_probs: Optional[torch.FloatTensor] = None
    end_top_index: Optional[torch.LongTensor] = None
    cls_logits: Optional[torch.FloatTensor] = None


thomwolf's avatar
thomwolf committed
1709
class SQuADHead(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1710
1711
    r"""
    A SQuAD head inspired by XLNet.
1712

Sylvain Gugger's avatar
Sylvain Gugger committed
1713
1714
1715
1716
    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the
            :obj:`layer_norm_eps` to use.
thomwolf's avatar
thomwolf committed
1717
    """
1718

thomwolf's avatar
thomwolf committed
1719
    def __init__(self, config):
Julien Chaumond's avatar
Julien Chaumond committed
1720
        super().__init__()
thomwolf's avatar
thomwolf committed
1721
1722
1723
1724
1725
1726
1727
        self.start_n_top = config.start_n_top
        self.end_n_top = config.end_n_top

        self.start_logits = PoolerStartLogits(config)
        self.end_logits = PoolerEndLogits(config)
        self.answer_class = PoolerAnswerClass(config)

Sylvain Gugger's avatar
Sylvain Gugger committed
1728
    @replace_return_docstrings(output_type=SquadHeadOutput, config_class=PretrainedConfig)
1729
    def forward(
1730
        self,
Sylvain Gugger's avatar
Sylvain Gugger committed
1731
1732
1733
1734
1735
1736
        hidden_states: torch.FloatTensor,
        start_positions: Optional[torch.LongTensor] = None,
        end_positions: Optional[torch.LongTensor] = None,
        cls_index: Optional[torch.LongTensor] = None,
        is_impossible: Optional[torch.LongTensor] = None,
        p_mask: Optional[torch.FloatTensor] = None,
1737
        return_dict: bool = False,
Sylvain Gugger's avatar
Sylvain Gugger committed
1738
1739
    ) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]:
        """
Lysandre's avatar
Lysandre committed
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
        Args:
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
                Final hidden states of the model on the sequence tokens.
            start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Positions of the first token for the labeled span.
            end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Positions of the last token for the labeled span.
            cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token.
            is_impossible (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Whether the question has a possible answer in the paragraph or not.
            p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1752
1753
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
Lysandre's avatar
Lysandre committed
1754
            return_dict (:obj:`bool`, `optional`, defaults to :obj:`False`):
1755
                Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
Sylvain Gugger's avatar
Sylvain Gugger committed
1756

Lysandre's avatar
Lysandre committed
1757
        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
1758
        """
thomwolf's avatar
thomwolf committed
1759
        start_logits = self.start_logits(hidden_states, p_mask=p_mask)
thomwolf's avatar
thomwolf committed
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782

        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, let's remove the dimension added by batch splitting
            for x in (start_positions, end_positions, cls_index, is_impossible):
                if x is not None and x.dim() > 1:
                    x.squeeze_(-1)

            # during training, compute the end logits based on the ground truth of the start position
            end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)

            loss_fct = CrossEntropyLoss()
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2

            if cls_index is not None and is_impossible is not None:
                # Predict answerability from the representation of CLS and START
                cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
                loss_fct_cls = nn.BCEWithLogitsLoss()
                cls_loss = loss_fct_cls(cls_logits, is_impossible)

                # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
                total_loss += cls_loss * 0.5
1783

1784
            return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,)
thomwolf's avatar
thomwolf committed
1785
1786
1787
1788

        else:
            # during inference, compute the end logits based on beam search
            bsz, slen, hsz = hidden_states.size()
1789
            start_log_probs = nn.functional.softmax(start_logits, dim=-1)  # shape (bsz, slen)
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800

            start_top_log_probs, start_top_index = torch.topk(
                start_log_probs, self.start_n_top, dim=-1
            )  # shape (bsz, start_n_top)
            start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz)  # shape (bsz, start_n_top, hsz)
            start_states = torch.gather(hidden_states, -2, start_top_index_exp)  # shape (bsz, start_n_top, hsz)
            start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1)  # shape (bsz, slen, start_n_top, hsz)

            hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
                start_states
            )  # shape (bsz, slen, start_n_top, hsz)
thomwolf's avatar
thomwolf committed
1801
1802
            p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
            end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
1803
            end_log_probs = nn.functional.softmax(end_logits, dim=1)  # shape (bsz, slen, start_n_top)
thomwolf's avatar
thomwolf committed
1804

1805
1806
1807
            end_top_log_probs, end_top_index = torch.topk(
                end_log_probs, self.end_n_top, dim=1
            )  # shape (bsz, end_n_top, start_n_top)
thomwolf's avatar
thomwolf committed
1808
1809
1810
1811
1812
1813
            end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
            end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)

            start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
            cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)

1814
            if not return_dict:
1815
1816
1817
1818
1819
1820
1821
1822
1823
                return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits)
            else:
                return SquadHeadOutput(
                    start_top_log_probs=start_top_log_probs,
                    start_top_index=start_top_index,
                    end_top_log_probs=end_top_log_probs,
                    end_top_index=end_top_index,
                    cls_logits=cls_logits,
                )
thomwolf's avatar
thomwolf committed
1824
1825
1826


class SequenceSummary(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1827
1828
1829
1830
1831
    r"""
    Compute a single vector summary of a sequence hidden states.

    Args:
        config (:class:`~transformers.PretrainedConfig`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1832
1833
            The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
            config class of your model for the default values it uses):
Sylvain Gugger's avatar
Sylvain Gugger committed
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845

            - **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are:

                - :obj:`"last"` -- Take the last token hidden state (like XLNet)
                - :obj:`"first"` -- Take the first token hidden state (like Bert)
                - :obj:`"mean"` -- Take the mean of all tokens hidden states
                - :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
                - :obj:`"attn"` -- Not implemented now, use multi-head attention

            - **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction.
            - **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to
              :obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`).
Sylvain Gugger's avatar
Sylvain Gugger committed
1846
            - **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the
Sylvain Gugger's avatar
Sylvain Gugger committed
1847
1848
1849
1850
1851
              output, another string or :obj:`None` will add no activation.
            - **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and
              activation.
            - **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and
              activation.
thomwolf's avatar
thomwolf committed
1852
    """
1853

1854
    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
1855
        super().__init__()
thomwolf's avatar
thomwolf committed
1856

1857
        self.summary_type = getattr(config, "summary_type", "last")
1858
        if self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
1859
1860
1861
1862
1863
            # We should use a standard multi-head attention module with absolute positional embedding for that.
            # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
            # We can probably just use the multi-head attention module of PyTorch >=1.1.0
            raise NotImplementedError

thomwolf's avatar
thomwolf committed
1864
        self.summary = Identity()
1865
1866
        if hasattr(config, "summary_use_proj") and config.summary_use_proj:
            if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
1867
                num_classes = config.num_labels
thomwolf's avatar
thomwolf committed
1868
1869
1870
1871
            else:
                num_classes = config.hidden_size
            self.summary = nn.Linear(config.hidden_size, num_classes)

1872
        activation_string = getattr(config, "summary_activation", None)
Lysandre's avatar
Lysandre committed
1873
        self.activation: Callable = get_activation(activation_string) if activation_string else Identity()
thomwolf's avatar
thomwolf committed
1874

thomwolf's avatar
thomwolf committed
1875
        self.first_dropout = Identity()
1876
        if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
1877
1878
            self.first_dropout = nn.Dropout(config.summary_first_dropout)

thomwolf's avatar
thomwolf committed
1879
        self.last_dropout = Identity()
1880
        if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
1881
            self.last_dropout = nn.Dropout(config.summary_last_dropout)
thomwolf's avatar
thomwolf committed
1882

Sylvain Gugger's avatar
Sylvain Gugger committed
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
    def forward(
        self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None
    ) -> torch.FloatTensor:
        """
        Compute a single vector summary of a sequence hidden states.

        Args:
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`[batch_size, seq_len, hidden_size]`):
                The hidden states of the last layer.
            cls_index (:obj:`torch.LongTensor` of shape :obj:`[batch_size]` or :obj:`[batch_size, ...]` where ... are optional leading dimensions of :obj:`hidden_states`, `optional`):
                Used if :obj:`summary_type == "cls_index"` and takes the last token of the sequence as classification
                token.

        Returns:
            :obj:`torch.FloatTensor`: The summary of the sequence hidden states.
thomwolf's avatar
thomwolf committed
1898
        """
1899
        if self.summary_type == "last":
thomwolf's avatar
thomwolf committed
1900
            output = hidden_states[:, -1]
1901
        elif self.summary_type == "first":
thomwolf's avatar
thomwolf committed
1902
            output = hidden_states[:, 0]
1903
        elif self.summary_type == "mean":
thomwolf's avatar
thomwolf committed
1904
            output = hidden_states.mean(dim=1)
1905
        elif self.summary_type == "cls_index":
thomwolf's avatar
thomwolf committed
1906
            if cls_index is None:
Lysandre's avatar
Lysandre committed
1907
1908
1909
1910
1911
                cls_index = torch.full_like(
                    hidden_states[..., :1, :],
                    hidden_states.shape[-2] - 1,
                    dtype=torch.long,
                )
thomwolf's avatar
thomwolf committed
1912
            else:
thomwolf's avatar
thomwolf committed
1913
                cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
1914
                cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
thomwolf's avatar
thomwolf committed
1915
            # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
1916
1917
            output = hidden_states.gather(-2, cls_index).squeeze(-2)  # shape (bsz, XX, hidden_size)
        elif self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
1918
1919
            raise NotImplementedError

1920
        output = self.first_dropout(output)
thomwolf's avatar
thomwolf committed
1921
1922
        output = self.summary(output)
        output = self.activation(output)
1923
        output = self.last_dropout(output)
thomwolf's avatar
thomwolf committed
1924
1925
1926
1927

        return output


1928
def unwrap_model(model: nn.Module) -> nn.Module:
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
    """
    Recursively unwraps a model from potential containers (as used in distributed training).

    Args:
        model (:obj:`torch.nn.Module`): The model to unwrap.
    """
    # since there could be multiple levels of wrapping, unwrap recursively
    if hasattr(model, "module"):
        return unwrap_model(model.module)
    else:
        return model


1942
def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int = 0) -> nn.Linear:
Sylvain Gugger's avatar
Sylvain Gugger committed
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
    """
    Prune a linear layer to keep only entries in index.

    Used to remove heads.

    Args:
        layer (:obj:`torch.nn.Linear`): The layer to prune.
        index (:obj:`torch.LongTensor`): The indices to keep in the layer.
        dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.

    Returns:
        :obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
    """
    index = index.to(layer.weight.device)
    W = layer.weight.index_select(dim, index).clone().detach()
    if layer.bias is not None:
        if dim == 1:
            b = layer.bias.clone().detach()
        else:
            b = layer.bias[index].clone().detach()
    new_size = list(layer.weight.size())
    new_size[dim] = len(index)
    new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
    new_layer.weight.requires_grad = False
    new_layer.weight.copy_(W.contiguous())
    new_layer.weight.requires_grad = True
    if layer.bias is not None:
        new_layer.bias.requires_grad = False
        new_layer.bias.copy_(b.contiguous())
        new_layer.bias.requires_grad = True
    return new_layer


Sylvain Gugger's avatar
Sylvain Gugger committed
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D:
    """
    Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights
    are transposed.

    Used to remove heads.

    Args:
        layer (:class:`~transformers.modeling_utils.Conv1D`): The layer to prune.
        index (:obj:`torch.LongTensor`): The indices to keep in the layer.
        dim (:obj:`int`, `optional`, defaults to 1): The dimension on which to keep the indices.

    Returns:
        :class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with :obj:`requires_grad=True`.
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
    """
    index = index.to(layer.weight.device)
    W = layer.weight.index_select(dim, index).clone().detach()
    if dim == 0:
        b = layer.bias.clone().detach()
    else:
        b = layer.bias[index].clone().detach()
    new_size = list(layer.weight.size())
    new_size[dim] = len(index)
    new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
    new_layer.weight.requires_grad = False
    new_layer.weight.copy_(W.contiguous())
    new_layer.weight.requires_grad = True
    new_layer.bias.requires_grad = False
    new_layer.bias.copy_(b.contiguous())
    new_layer.bias.requires_grad = True
    return new_layer
2007
2008


Sylvain Gugger's avatar
Sylvain Gugger committed
2009
def prune_layer(
2010
2011
    layer: Union[nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None
) -> Union[nn.Linear, Conv1D]:
Sylvain Gugger's avatar
Sylvain Gugger committed
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
    """
    Prune a Conv1D or linear layer to keep only entries in index.

    Used to remove heads.

    Args:
        layer (:obj:`Union[torch.nn.Linear, Conv1D]`): The layer to prune.
        index (:obj:`torch.LongTensor`): The indices to keep in the layer.
        dim (:obj:`int`, `optional`): The dimension on which to keep the indices.

    Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
2023
2024
        :obj:`torch.nn.Linear` or :class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with
        :obj:`requires_grad=True`.
2025
2026
2027
2028
2029
2030
    """
    if isinstance(layer, nn.Linear):
        return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
    elif isinstance(layer, Conv1D):
        return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
    else:
2031
        raise ValueError(f"Can't prune layer of class {layer.__class__}")
Patrick von Platen's avatar
Patrick von Platen committed
2032
2033
2034


def apply_chunking_to_forward(
2035
    forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors
Patrick von Platen's avatar
Patrick von Platen committed
2036
2037
) -> torch.Tensor:
    """
2038
2039
2040
2041
2042
    This function chunks the :obj:`input_tensors` into smaller input tensor parts of size :obj:`chunk_size` over the
    dimension :obj:`chunk_dim`. It then applies a layer :obj:`forward_fn` to each chunk independently to save memory.

    If the :obj:`forward_fn` is independent across the :obj:`chunk_dim` this function will yield the same result as
    directly applying :obj:`forward_fn` to :obj:`input_tensors`.
Patrick von Platen's avatar
Patrick von Platen committed
2043
2044

    Args:
2045
2046
        forward_fn (:obj:`Callable[..., torch.Tensor]`):
            The forward function of the model.
2047
2048
2049
2050
2051
        chunk_size (:obj:`int`):
            The chunk size of a chunked tensor: :obj:`num_chunks = len(input_tensors[0]) / chunk_size`.
        chunk_dim (:obj:`int`):
            The dimension over which the :obj:`input_tensors` should be chunked.
        input_tensors (:obj:`Tuple[torch.Tensor]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
2052
2053
            The input tensors of ``forward_fn`` which will be chunked

Patrick von Platen's avatar
Patrick von Platen committed
2054
    Returns:
2055
        :obj:`torch.Tensor`: A tensor with the same shape as the :obj:`forward_fn` would have given if applied`.
Patrick von Platen's avatar
Patrick von Platen committed
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066


    Examples::

        # rename the usual forward() fn to forward_chunk()
        def forward_chunk(self, hidden_states):
            hidden_states = self.decoder(hidden_states)
            return hidden_states

        # implement a chunked forward function
        def forward(self, hidden_states):
2067
            return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
Patrick von Platen's avatar
Patrick von Platen committed
2068
2069
    """

2070
    assert len(input_tensors) > 0, f"{input_tensors} has to be a tuple/list of tensors"
2071
    tensor_shape = input_tensors[0].shape[chunk_dim]
Patrick von Platen's avatar
Patrick von Platen committed
2072
    assert all(
2073
        input_tensor.shape[chunk_dim] == tensor_shape for input_tensor in input_tensors
Patrick von Platen's avatar
Patrick von Platen committed
2074
2075
    ), "All input tenors have to be of the same shape"

2076
    # inspect.signature exist since python 3.5 and is a python method -> no problem with backward compatibility
Patrick von Platen's avatar
Patrick von Platen committed
2077
    num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters)
2078
2079
2080
2081
2082
    if num_args_in_forward_chunk_fn != len(input_tensors):
        raise ValueError(
            f"forward_chunk_fn expects {num_args_in_forward_chunk_fn} arguments, but only {len(input_tensors)} input "
            "tensors are given"
        )
Patrick von Platen's avatar
Patrick von Platen committed
2083
2084

    if chunk_size > 0:
2085
2086
2087
2088
2089
        if input_tensors[0].shape[chunk_dim] % chunk_size != 0:
            raise ValueError(
                f"The dimension to be chunked {input_tensors[0].shape[chunk_dim]} has to be a multiple of the chunk "
                f"size {chunk_size}"
            )
Patrick von Platen's avatar
Patrick von Platen committed
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100

        num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size

        # chunk input tensor into tuples
        input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors)
        # apply forward fn to every tuple
        output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks))
        # concatenate output at same dimension
        return torch.cat(output_chunks, dim=chunk_dim)

    return forward_fn(*input_tensors)