modeling_utils.py 94.3 KB
Newer Older
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Patrick von Platen's avatar
Patrick von Platen committed
17
import inspect
18
import os
19
import re
20
import warnings
21
from contextlib import contextmanager
22
from dataclasses import dataclass
23
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
24
25

import torch
26
from torch import Tensor, device, dtype, nn
27
from torch.nn import CrossEntropyLoss
28

29
from .activations import get_activation
30
from .configuration_utils import PretrainedConfig
31
from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled
32
from .file_utils import (
Aymeric Augustin's avatar
Aymeric Augustin committed
33
    DUMMY_INPUTS,
34
    FLAX_WEIGHTS_NAME,
35
36
37
    TF2_WEIGHTS_NAME,
    TF_WEIGHTS_NAME,
    WEIGHTS_NAME,
38
    ModelOutput,
Sylvain Gugger's avatar
Sylvain Gugger committed
39
    PushToHubMixin,
40
41
    cached_path,
    hf_bucket_url,
42
    is_offline_mode,
43
    is_remote_url,
Sylvain Gugger's avatar
Sylvain Gugger committed
44
    replace_return_docstrings,
45
)
46
from .generation_utils import GenerationMixin
Lysandre Debut's avatar
Lysandre Debut committed
47
from .utils import logging
48

Aymeric Augustin's avatar
Aymeric Augustin committed
49

Lysandre Debut's avatar
Lysandre Debut committed
50
logger = logging.get_logger(__name__)
51

52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71

_init_weights = True


@contextmanager
def no_init_weights(_enable=True):
    """
    Context manager to globally disable weight initialization to speed up loading large models.

    TODO(Patrick): Delete safety argument `_enable=True` at next major version. .
    """
    global _init_weights
    if _enable:
        _init_weights = False
    try:
        yield
    finally:
        _init_weights = True


thomwolf's avatar
thomwolf committed
72
73
74
75
76
try:
    from torch.nn import Identity
except ImportError:
    # Older PyTorch compatibility
    class Identity(nn.Module):
Lysandre's avatar
Lysandre committed
77
        r"""A placeholder identity operator that is argument-insensitive."""
78

thomwolf's avatar
thomwolf committed
79
        def __init__(self, *args, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
80
            super().__init__()
thomwolf's avatar
thomwolf committed
81
82
83
84

        def forward(self, input):
            return input

85

86
def find_pruneable_heads_and_indices(
Sylvain Gugger's avatar
Sylvain Gugger committed
87
88
89
90
91
92
93
94
95
96
97
98
99
100
    heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int]
) -> Tuple[Set[int], torch.LongTensor]:
    """
    Finds the heads and their indices taking :obj:`already_pruned_heads` into account.

    Args:
        heads (:obj:`List[int]`): List of the indices of heads to prune.
        n_heads (:obj:`int`): The number of heads in the model.
        head_size (:obj:`int`): The size of each head.
        already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.

    Returns:
        :obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
    """
101
102
103
104
105
106
107
108
109
110
111
    mask = torch.ones(n_heads, head_size)
    heads = set(heads) - already_pruned_heads  # Convert to set and remove already pruned heads
    for head in heads:
        # Compute how many pruned heads are before the head and move the index accordingly
        head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
        mask[head] = 0
    mask = mask.view(-1).contiguous().eq(1)
    index: torch.LongTensor = torch.arange(len(mask))[mask].long()
    return heads, index


Lysandre Debut's avatar
Lysandre Debut committed
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
def get_parameter_device(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]):
    try:
        return next(parameter.parameters()).device
    except StopIteration:
        # For nn.DataParallel compatibility in PyTorch 1.5

        def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
            tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
            return tuples

        gen = parameter._named_members(get_members_fn=find_tensor_attributes)
        first_tuple = next(gen)
        return first_tuple[1].device


def get_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]):
    try:
        return next(parameter.parameters()).dtype
    except StopIteration:
        # For nn.DataParallel compatibility in PyTorch 1.5

        def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
            tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
            return tuples

        gen = parameter._named_members(get_members_fn=find_tensor_attributes)
        first_tuple = next(gen)
        return first_tuple[1].dtype


142
class ModuleUtilsMixin:
Julien Chaumond's avatar
Julien Chaumond committed
143
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
144
    A few utilities for :obj:`torch.nn.Modules`, to be used as a mixin.
Julien Chaumond's avatar
Julien Chaumond committed
145
146
    """

147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
    @staticmethod
    def _hook_rss_memory_pre_forward(module, *args, **kwargs):
        try:
            import psutil
        except (ImportError):
            raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")

        process = psutil.Process(os.getpid())
        mem = process.memory_info()
        module.mem_rss_pre_forward = mem.rss
        return None

    @staticmethod
    def _hook_rss_memory_post_forward(module, *args, **kwargs):
        try:
            import psutil
        except (ImportError):
            raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")

        process = psutil.Process(os.getpid())
        mem = process.memory_info()
        module.mem_rss_post_forward = mem.rss
        mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward
        module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0)
        return None

    def add_memory_hooks(self):
Sylvain Gugger's avatar
Sylvain Gugger committed
174
175
176
177
178
        """
        Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.

        Increase in memory consumption is stored in a :obj:`mem_rss_diff` attribute for each module and can be reset to
        zero with :obj:`model.reset_memory_hooks_state()`.
179
180
181
182
183
184
185
        """
        for module in self.modules():
            module.register_forward_pre_hook(self._hook_rss_memory_pre_forward)
            module.register_forward_hook(self._hook_rss_memory_post_forward)
        self.reset_memory_hooks_state()

    def reset_memory_hooks_state(self):
Sylvain Gugger's avatar
Sylvain Gugger committed
186
187
188
189
        """
        Reset the :obj:`mem_rss_diff` attribute of each module (see
        :func:`~transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks`).
        """
190
191
192
193
194
        for module in self.modules():
            module.mem_rss_diff = 0
            module.mem_rss_post_forward = 0
            module.mem_rss_pre_forward = 0

195
    @property
196
    def device(self) -> device:
197
        """
198
199
        :obj:`torch.device`: The device on which the module is (assuming that all the module parameters are on the same
        device).
200
        """
Lysandre Debut's avatar
Lysandre Debut committed
201
        return get_parameter_device(self)
202

203
204
    @property
    def dtype(self) -> dtype:
205
        """
206
        :obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
207
        """
Lysandre Debut's avatar
Lysandre Debut committed
208
        return get_parameter_dtype(self)
209
210

    def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:
Sylvain Gugger's avatar
Sylvain Gugger committed
211
212
213
214
215
216
217
218
219
        """
        Invert an attention mask (e.g., switches 0. and 1.).

        Args:
            encoder_attention_mask (:obj:`torch.Tensor`): An attention mask.

        Returns:
            :obj:`torch.Tensor`: The inverted attention mask.
        """
220
221
222
223
224
225
226
227
228
229
        if encoder_attention_mask.dim() == 3:
            encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
        if encoder_attention_mask.dim() == 2:
            encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
        # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
        # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
        # /transformer/transformer_layers.py#L270
        # encoder_extended_attention_mask = (encoder_extended_attention_mask ==
        # encoder_extended_attention_mask.transpose(-1, -2))
        encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype)  # fp16 compatibility
230
231
232
233
234
235
236

        if self.dtype == torch.float16:
            encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4
        elif self.dtype == torch.float32:
            encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
        else:
            raise ValueError(
237
                f"{self.dtype} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`"
238
239
            )

240
241
        return encoder_extended_attention_mask

Sylvain Gugger's avatar
Sylvain Gugger committed
242
243
244
    def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device) -> Tensor:
        """
        Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
245
246

        Arguments:
Sylvain Gugger's avatar
Sylvain Gugger committed
247
248
249
250
251
252
            attention_mask (:obj:`torch.Tensor`):
                Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
            input_shape (:obj:`Tuple[int]`):
                The shape of the input to the model.
            device: (:obj:`torch.device`):
                The device of the input to the model.
253
254

        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
255
            :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
256
257
258
259
260
261
262
263
264
265
266
267
268
        """
        # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
        # ourselves in which case we just need to make it broadcastable to all heads.
        if attention_mask.dim() == 3:
            extended_attention_mask = attention_mask[:, None, :, :]
        elif attention_mask.dim() == 2:
            # Provided a padding mask of dimensions [batch_size, seq_length]
            # - if the model is a decoder, apply a causal mask in addition to the padding mask
            # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
            if self.config.is_decoder:
                batch_size, seq_length = input_shape
                seq_ids = torch.arange(seq_length, device=device)
                causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
269
                # in case past_key_values are used we need to add a prefix ones mask to the causal mask
Patrick von Platen's avatar
Patrick von Platen committed
270
271
272
                # causal and attention masks must have same type with pytorch version < 1.3
                causal_mask = causal_mask.to(attention_mask.dtype)

273
274
275
                if causal_mask.shape[1] < attention_mask.shape[1]:
                    prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
                    causal_mask = torch.cat(
Patrick von Platen's avatar
Patrick von Platen committed
276
277
278
279
280
281
282
                        [
                            torch.ones(
                                (batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
                            ),
                            causal_mask,
                        ],
                        axis=-1,
283
284
                    )

285
286
287
288
289
                extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
            else:
                extended_attention_mask = attention_mask[:, None, None, :]
        else:
            raise ValueError(
290
                f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
291
292
293
294
295
296
297
298
299
300
301
            )

        # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
        # masked positions, this operation will create a tensor which is 0.0 for
        # positions we want to attend and -10000.0 for masked positions.
        # Since we are adding it to the raw scores before the softmax, this is
        # effectively the same as removing these entirely.
        extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)  # fp16 compatibility
        extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
        return extended_attention_mask

Sylvain Gugger's avatar
Sylvain Gugger committed
302
303
304
    def get_head_mask(
        self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False
    ) -> Tensor:
305
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
306
307
308
309
310
311
312
        Prepare the head mask if needed.

        Args:
            head_mask (:obj:`torch.Tensor` with shape :obj:`[num_heads]` or :obj:`[num_hidden_layers x num_heads]`, `optional`):
                The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
            num_hidden_layers (:obj:`int`):
                The number of hidden layers in the model.
313
            is_attention_chunked: (:obj:`bool`, `optional`, defaults to :obj:`False`):
Sylvain Gugger's avatar
Sylvain Gugger committed
314
315
                Whether or not the attentions scores are computed by chunks or not.

316
        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
317
318
            :obj:`torch.Tensor` with shape :obj:`[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or
            list with :obj:`[None]` for each layer.
319
320
321
        """
        if head_mask is not None:
            head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
Patrick von Platen's avatar
Patrick von Platen committed
322
323
            if is_attention_chunked is True:
                head_mask = head_mask.unsqueeze(-1)
324
325
326
327
328
329
330
331
332
333
334
335
336
        else:
            head_mask = [None] * num_hidden_layers

        return head_mask

    def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
        """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
        if head_mask.dim() == 1:
            head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
            head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
        elif head_mask.dim() == 2:
            head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # We can specify head_mask for each layer
        assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
337
        head_mask = head_mask.to(dtype=self.dtype)  # switch to float if need + fp16 compatibility
338
339
        return head_mask

340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
    def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int:
        """
        Get number of (optionally, trainable or non-embeddings) parameters in the module.

        Args:
            only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to return only the number of trainable parameters

            exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to return only the number of non-embeddings parameters

        Returns:
            :obj:`int`: The number of parameters.
        """

        def parameter_filter(x):
356
            return (x.requires_grad or not only_trainable) and not (isinstance(x, nn.Embedding) and exclude_embeddings)
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385

        params = filter(parameter_filter, self.parameters()) if only_trainable else self.parameters()
        return sum(p.numel() for p in params)

    def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int:
        """
        Helper function to estimate the total number of tokens from the model inputs.

        Args:
            inputs (:obj:`dict`): The model inputs.

        Returns:
            :obj:`int`: The total number of tokens.
        """
        token_inputs = [tensor for key, tensor in input_dict.items() if "input" in key]
        if token_inputs:
            return sum([token_input.numel() for token_input in token_inputs])
        else:
            warnings.warn(
                "Could not estimate the number of tokens of the input, floating-point operations will not be computed"
            )
            return 0

    def floating_point_ops(
        self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool = True
    ) -> int:
        """
        Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a
        batch with this transformer model. Default approximation neglects the quadratic dependency on the number of
Sylvain Gugger's avatar
Sylvain Gugger committed
386
        tokens (valid if :obj:`12 * d_model << sequence_length`) as laid out in `this paper
387
        <https://arxiv.org/pdf/2001.08361.pdf>`__ section 2.1. Should be overridden for transformers with parameter
Sylvain Gugger's avatar
Sylvain Gugger committed
388
        re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths.
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405

        Args:
            batch_size (:obj:`int`):
                The batch size for the forward pass.

            sequence_length (:obj:`int`):
                The number of tokens in each line of the batch.

            exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`True`):
                Whether or not to count embedding and softmax operations.

        Returns:
            :obj:`int`: The number of floating-point operations.
        """

        return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings)

Julien Chaumond's avatar
Julien Chaumond committed
406

Sylvain Gugger's avatar
Sylvain Gugger committed
407
class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMixin):
408
409
    r"""
    Base class for all models.
410

411
412
    :class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods
    for loading, downloading and saving models as well as a few methods common to all models to:
413

414
415
        * resize the input embeddings,
        * prune heads in the self-attention heads.
416

417
    Class attributes (overridden by derived classes):
Sylvain Gugger's avatar
Sylvain Gugger committed
418

419
420
        - **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of
          :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
Sylvain Gugger's avatar
Sylvain Gugger committed
421
422
        - **load_tf_weights** (:obj:`Callable`) -- A python `method` for loading a TensorFlow checkpoint in a PyTorch
          model, taking as arguments:
423

424
425
            - **model** (:class:`~transformers.PreTrainedModel`) -- An instance of the model on which to load the
              TensorFlow checkpoint.
Sylvain Gugger's avatar
Sylvain Gugger committed
426
427
            - **config** (:class:`~transformers.PreTrainedConfig`) -- An instance of the configuration associated to
              the model.
428
429
430
431
            - **path** (:obj:`str`) -- A path to the TensorFlow checkpoint.

        - **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in
          derived classes of the same architecture adding modules on top of the base model.
432
        - **is_parallelizable** (:obj:`bool`) -- A flag indicating whether this model supports model parallelization.
433
    """
434
    config_class = None
435
    base_model_prefix = ""
436
437
438
439
440
441
442
443
444
    # a list of re pattern of tensor names to ignore from the model when loading the model weights
    # (and avoid unnecessary warnings).
    _keys_to_ignore_on_load_missing = None
    # a list of re pattern of tensor names to ignore from the weights when loading the model weights
    # (and avoid unnecessary warnings).
    _keys_to_ignore_on_load_unexpected = None
    # a list of of tensor names to ignore when saving the model (useful for keys that aren't
    # trained, but which are deterministic)
    _keys_to_ignore_on_save = None
445

446
447
    is_parallelizable = False

448
    @property
449
    def dummy_inputs(self) -> Dict[str, torch.Tensor]:
450
451
        """
        :obj:`Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network.
452
        """
453
        return {"input_ids": torch.tensor(DUMMY_INPUTS)}
454

455
    def __init__(self, config: PretrainedConfig, *inputs, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
456
        super().__init__()
457
458
        if not isinstance(config, PretrainedConfig):
            raise ValueError(
459
460
461
                f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
                "`PretrainedConfig`. To create a model from a pretrained model use "
                f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
462
            )
463
        # Save config and origin of the pretrained weights if given in model
464
        self.config = config
465
        self.name_or_path = config.name_or_path
466

467
    @property
468
469
470
471
    def base_model(self) -> nn.Module:
        """
        :obj:`torch.nn.Module`: The main body of the model.
        """
472
        return getattr(self, self.base_model_prefix, self)
thomwolf's avatar
thomwolf committed
473

474
    def get_input_embeddings(self) -> nn.Module:
475
476
477
478
        """
        Returns the model's input embeddings.

        Returns:
479
            :obj:`nn.Module`: A torch module mapping vocabulary to hidden states.
thomwolf's avatar
thomwolf committed
480
        """
481
        base_model = getattr(self, self.base_model_prefix, self)
thomwolf's avatar
thomwolf committed
482
483
484
485
        if base_model is not self:
            return base_model.get_input_embeddings()
        else:
            raise NotImplementedError
thomwolf's avatar
thomwolf committed
486

487
    def set_input_embeddings(self, value: nn.Module):
488
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
489
        Set model's input embeddings.
490
491

        Args:
492
            value (:obj:`nn.Module`): A module mapping vocabulary to hidden states.
thomwolf's avatar
thomwolf committed
493
494
495
496
497
498
        """
        base_model = getattr(self, self.base_model_prefix, self)
        if base_model is not self:
            base_model.set_input_embeddings(value)
        else:
            raise NotImplementedError
thomwolf's avatar
thomwolf committed
499

500
    def get_output_embeddings(self) -> nn.Module:
501
502
503
504
        """
        Returns the model's output embeddings.

        Returns:
505
            :obj:`nn.Module`: A torch module mapping hidden states to vocabulary.
thomwolf's avatar
thomwolf committed
506
        """
507
        return None  # Overwrite for models with output embeddings
thomwolf's avatar
thomwolf committed
508

509
510
511
512
513
514
    def _init_weights(self, module):
        """
        Initialize the weights. This method should be overridden by derived class.
        """
        raise NotImplementedError(f"Make sure `_init_weigths` is implemented for {self.__class__}")

515
    def tie_weights(self):
516
517
        """
        Tie the weights between the input embeddings and the output embeddings.
518
519

        If the :obj:`torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
520
        the weights instead.
thomwolf's avatar
thomwolf committed
521
        """
thomwolf's avatar
thomwolf committed
522
        output_embeddings = self.get_output_embeddings()
523
        if output_embeddings is not None and self.config.tie_word_embeddings:
thomwolf's avatar
thomwolf committed
524
            self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
thomwolf's avatar
thomwolf committed
525

526
        if self.config.is_encoder_decoder and self.config.tie_encoder_decoder:
Weizhen's avatar
Weizhen committed
527
528
            if hasattr(self, self.base_model_prefix):
                self = getattr(self, self.base_model_prefix)
529
530
531
532
533
            self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix)

    @staticmethod
    def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str):
        uninitialized_encoder_weights: List[str] = []
Weizhen's avatar
Weizhen committed
534
535
536
537
        if decoder.__class__ != encoder.__class__:
            logger.info(
                f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized."
            )
538
539
540
541
542
543
544
545
546
547

        def tie_encoder_to_decoder_recursively(
            decoder_pointer: nn.Module,
            encoder_pointer: nn.Module,
            module_name: str,
            uninitialized_encoder_weights: List[str],
            depth=0,
        ):
            assert isinstance(decoder_pointer, nn.Module) and isinstance(
                encoder_pointer, nn.Module
548
            ), f"{decoder_pointer} and {encoder_pointer} have to be of type nn.Module"
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
            if hasattr(decoder_pointer, "weight"):
                assert hasattr(encoder_pointer, "weight")
                encoder_pointer.weight = decoder_pointer.weight
                if hasattr(decoder_pointer, "bias"):
                    assert hasattr(encoder_pointer, "bias")
                    encoder_pointer.bias = decoder_pointer.bias
                return

            encoder_modules = encoder_pointer._modules
            decoder_modules = decoder_pointer._modules
            if len(decoder_modules) > 0:
                assert (
                    len(encoder_modules) > 0
                ), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"

                all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()])
                encoder_layer_pos = 0
                for name, module in decoder_modules.items():
                    if name.isdigit():
                        encoder_name = str(int(name) + encoder_layer_pos)
                        decoder_name = name
Weizhen's avatar
Weizhen committed
570
571
572
                        if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len(
                            encoder_modules
                        ) != len(decoder_modules):
573
574
                            # this can happen if the name corresponds to the position in a list module list of layers
                            # in this case the decoder has added a cross-attention that the encoder does not have
575
                            # thus skip this step and subtract one layer pos from encoder
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
                            encoder_layer_pos -= 1
                            continue
                    elif name not in encoder_modules:
                        continue
                    elif depth > 500:
                        raise ValueError(
                            "Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model."
                        )
                    else:
                        decoder_name = encoder_name = name
                    tie_encoder_to_decoder_recursively(
                        decoder_modules[decoder_name],
                        encoder_modules[encoder_name],
                        module_name + "/" + name,
                        uninitialized_encoder_weights,
                        depth=depth + 1,
                    )
                    all_encoder_weights.remove(module_name + "/" + encoder_name)

                uninitialized_encoder_weights += list(all_encoder_weights)

        # tie weights recursively
        tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights)
        if len(uninitialized_encoder_weights) > 0:
            logger.warning(
                f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}"
            )

604
    def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
Lysandre's avatar
Lysandre committed
605
        """Tie or clone module weights depending of whether we are using TorchScript or not"""
thomwolf's avatar
thomwolf committed
606
        if self.config.torchscript:
607
            output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
thomwolf's avatar
thomwolf committed
608
        else:
609
            output_embeddings.weight = input_embeddings.weight
thomwolf's avatar
thomwolf committed
610

Sam Shleifer's avatar
Sam Shleifer committed
611
        if getattr(output_embeddings, "bias", None) is not None:
612
            output_embeddings.bias.data = nn.functional.pad(
613
                output_embeddings.bias.data,
Lysandre's avatar
Lysandre committed
614
615
616
617
                (
                    0,
                    output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],
                ),
618
619
                "constant",
                0,
620
            )
621
        if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
622
            output_embeddings.out_features = input_embeddings.num_embeddings
623

624
    def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
625
626
        """
        Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`.
627

628
        Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method.
thomwolf's avatar
thomwolf committed
629

630
631
632
633
        Arguments:
            new_num_tokens (:obj:`int`, `optional`):
                The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
                vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`,
634
                just returns a pointer to the input tokens :obj:`torch.nn.Embedding` module of the model without doing
635
636
637
638
                anything.

        Return:
            :obj:`torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model.
thomwolf's avatar
thomwolf committed
639
        """
640
        model_embeds = self._resize_token_embeddings(new_num_tokens)
thomwolf's avatar
thomwolf committed
641
642
        if new_num_tokens is None:
            return model_embeds
thomwolf's avatar
thomwolf committed
643
644
645

        # Update base model and current model config
        self.config.vocab_size = new_num_tokens
646
        self.vocab_size = new_num_tokens
thomwolf's avatar
thomwolf committed
647
648

        # Tie weights again if needed
649
        self.tie_weights()
thomwolf's avatar
thomwolf committed
650

thomwolf's avatar
thomwolf committed
651
652
        return model_embeds

653
    def _resize_token_embeddings(self, new_num_tokens):
thomwolf's avatar
thomwolf committed
654
655
656
        old_embeddings = self.get_input_embeddings()
        new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
        self.set_input_embeddings(new_embeddings)
657
658
659
660
661
662
663

        # if word embeddings are not tied, make sure that lm head is resized as well
        if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings:
            old_lm_head = self.get_output_embeddings()
            new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens)
            self.set_output_embeddings(new_lm_head)

thomwolf's avatar
thomwolf committed
664
        return self.get_input_embeddings()
665

666
    def _get_resized_embeddings(
667
668
        self, old_embeddings: nn.Embedding, new_num_tokens: Optional[int] = None
    ) -> nn.Embedding:
669
670
671
        """
        Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly
        initialized vectors at the end. Reducing the size will remove vectors from the end
672
673

        Args:
674
            old_embeddings (:obj:`torch.nn.Embedding`):
675
                Old embeddings to be resized.
676
            new_num_tokens (:obj:`int`, `optional`):
677
                New number of tokens in the embedding matrix.
678
679
680

                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
                vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
681
                :obj:`torch.nn.Embedding`` module of the model without doing anything.
682
683
684
685

        Return:
            :obj:`torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if
            :obj:`new_num_tokens` is :obj:`None`
686
687
688
689
        """
        if new_num_tokens is None:
            return old_embeddings

690
691
692
693
694
695
696
697
        if is_deepspeed_zero3_enabled():
            import deepspeed

            with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None):
                old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
        else:
            old_num_tokens, old_embedding_dim = old_embeddings.weight.size()

698
699
700
        if old_num_tokens == new_num_tokens:
            return old_embeddings

701
702
703
704
705
706
        if not isinstance(old_embeddings, nn.Embedding):
            raise TypeError(
                f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}."
                f"You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Embedding}."
            )

707
        # Build new embeddings
708
709
710
        new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim).to(
            self.device, dtype=old_embeddings.weight.dtype
        )
711
712
713
714

        # initialize all new embeddings (in particular added tokens)
        self._init_weights(new_embeddings)

715
        # Copy token embeddings from the previous weights
716
717
718
719
720
721
722
723
724
725
726

        # numbers of tokens to copy
        n = min(old_num_tokens, new_num_tokens)
        if is_deepspeed_zero3_enabled():
            import deepspeed

            with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=0):
                if torch.distributed.get_rank() == 0:
                    new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]
        else:
            new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]
727
728
729

        return new_embeddings

730
    def _get_resized_lm_head(
731
732
        self, old_lm_head: nn.Linear, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False
    ) -> nn.Linear:
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
        """
        Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized
        vectors at the end. Reducing the size will remove vectors from the end

        Args:
            old_lm_head (:obj:`torch.nn.Linear`):
                Old lm head liner layer to be resized.
            new_num_tokens (:obj:`int`, `optional`):
                New number of tokens in the linear matrix.

                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
                vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
                :obj:`torch.nn.Linear`` module of the model without doing anything.
            transposed (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether ``old_lm_head`` is transposed or not. If True ``old_lm_head.size()`` is ``lm_head_dim,
                vocab_size`` else ``vocab_size, lm_head_dim``.

        Return:
            :obj:`torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if
            :obj:`new_num_tokens` is :obj:`None`
        """
        if new_num_tokens is None:
            return old_lm_head

        old_num_tokens, old_lm_head_dim = (
            old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size()
        )

        if old_num_tokens == new_num_tokens:
            return old_lm_head

        if not isinstance(old_lm_head, nn.Linear):
            raise TypeError(
                f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}."
                f"You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Linear}."
            )

        # Build new lm head
        new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim)
        has_new_lm_head_bias = old_lm_head.bias is not None
        new_lm_head = nn.Linear(*new_lm_head_shape, bias=has_new_lm_head_bias).to(self.device)

        # initialize new lm head (in particular added tokens)
        self._init_weights(new_lm_head)

        num_tokens_to_copy = min(old_num_tokens, new_num_tokens)

        # Copy old lm head weights to new lm head
        if not transposed:
            new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :]
        else:
            new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy]

        # Copy bias weights to new lm head
        if has_new_lm_head_bias:
            new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy]

        return new_lm_head

792
    def init_weights(self):
793
        """
794
        If needed prunes and maybe initializes weights.
795
        """
796
797
798
799
        # Prune heads if needed
        if self.config.pruned_heads:
            self.prune_heads(self.config.pruned_heads)

800
801
802
803
804
805
806
        if _init_weights:
            # Initialize weights
            self.apply(self._init_weights)

            # Tie weights should be skipped when not initializing all weights
            # since from_pretrained(...) calls tie weights anyways
            self.tie_weights()
807

808
809
810
    def prune_heads(self, heads_to_prune: Dict[int, List[int]]):
        """
        Prunes heads of the base model.
811

812
813
        Arguments:
            heads_to_prune (:obj:`Dict[int, List[int]]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
814
815
816
                Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of
                heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads
                0 and 2 on layer 1 and heads 2 and 3 on layer 2.
thomwolf's avatar
thomwolf committed
817
        """
818
        # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
819
        for layer, heads in heads_to_prune.items():
820
821
822
            union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
            self.config.pruned_heads[layer] = list(union_heads)  # Unfortunately we have to store it as list for JSON

823
        self.base_model._prune_heads(heads_to_prune)
thomwolf's avatar
thomwolf committed
824

825
826
827
828
829
830
    def save_pretrained(
        self,
        save_directory: Union[str, os.PathLike],
        save_config: bool = True,
        state_dict: Optional[dict] = None,
        save_function: Callable = torch.save,
Sylvain Gugger's avatar
Sylvain Gugger committed
831
832
        push_to_hub: bool = False,
        **kwargs,
833
    ):
834
835
836
        """
        Save a model and its configuration file to a directory, so that it can be re-loaded using the
        `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
837

838
        Arguments:
839
            save_directory (:obj:`str` or :obj:`os.PathLike`):
840
                Directory to which to save. Will be created if it doesn't exist.
841
842
843
844
845
846
847
848
849
850
851
            save_config (:obj:`bool`, `optional`, defaults to :obj:`True`):
                Whether or not to save the config of the model. Useful when in distributed training like TPUs and need
                to call this function on all processes. In this case, set :obj:`save_config=True` only on the main
                process to avoid race conditions.
            state_dict (nested dictionary of :obj:`torch.Tensor`):
                The state dictionary of the model to save. Will default to :obj:`self.state_dict()`, but can be used to
                only save parts of the model or if special precautions need to be taken when recovering the state
                dictionary of a model (like when using model parallelism).
            save_function (:obj:`Callable`):
                The function to use to save the state dictionary. Useful on distributed training like TPUs when one
                need to replace :obj:`torch.save` by another method.
Sylvain Gugger's avatar
Sylvain Gugger committed
852
853
            push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to push your model to the Hugging Face model hub after saving it.
854
855
856
857
858
859
860
861

                .. warning::

                    Using :obj:`push_to_hub=True` will synchronize the repository you are pushing to with
                    :obj:`save_directory`, which requires :obj:`save_directory` to be a local clone of the repo you are
                    pushing to if it's an existing folder. Pass along :obj:`temp_dir=True` to use a temporary directory
                    instead.

Sylvain Gugger's avatar
Sylvain Gugger committed
862
863
864
            kwargs:
                Additional key word arguments passed along to the
                :meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method.
865
        """
866
        if os.path.isfile(save_directory):
867
            logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
868
            return
869
870
871
872
873

        if push_to_hub:
            commit_message = kwargs.pop("commit_message", None)
            repo = self._create_or_get_repo(save_directory, **kwargs)

874
        os.makedirs(save_directory, exist_ok=True)
875

Julien Chaumond's avatar
Julien Chaumond committed
876
        # Only save the model itself if we are using distributed training
877
        model_to_save = unwrap_model(self)
878

Julien Chaumond's avatar
Julien Chaumond committed
879
880
881
        # Attach architecture to the config
        model_to_save.config.architectures = [model_to_save.__class__.__name__]

882
883
884
885
886
887
888
        # Save the config
        if save_config:
            model_to_save.config.save_pretrained(save_directory)

        # Save the model
        if state_dict is None:
            state_dict = model_to_save.state_dict()
889
890

        # Handle the case where some state_dict keys shouldn't be saved
891
892
        if self._keys_to_ignore_on_save is not None:
            state_dict = {k: v for k, v in state_dict.items() if k not in self._keys_to_ignore_on_save}
893

894
895
        # If we save using the predefined names, we can load using `from_pretrained`
        output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
896
        save_function(state_dict, output_model_file)
897

898
        logger.info(f"Model weights saved in {output_model_file}")
899

Sylvain Gugger's avatar
Sylvain Gugger committed
900
        if push_to_hub:
901
            url = self._push_to_hub(repo, commit_message=commit_message)
Sylvain Gugger's avatar
Sylvain Gugger committed
902
903
            logger.info(f"Model pushed to the hub in this commit: {url}")

904
    @classmethod
905
    def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
906
907
        r"""
        Instantiate a pretrained pytorch model from a pre-trained model configuration.
908

Sylvain Gugger's avatar
Sylvain Gugger committed
909
910
        The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated). To
        train the model, you should first set it back in training mode with ``model.train()``.
911

912
913
914
        The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come
        pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
        task.
915

916
917
        The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those
        weights are discarded.
918

919
        Parameters:
920
            pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`, `optional`):
921
922
                Can be either:

923
924
925
                    - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
                      Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
                      a user or organization name, like ``dbmdz/bert-base-german-cased``.
926
927
                    - A path to a `directory` containing model weights saved using
                      :func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
Sylvain Gugger's avatar
Sylvain Gugger committed
928
                    - A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
929
930
931
                      this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
                      as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
                      a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
932
933
934
                    - A path or url to a model folder containing a `flax checkpoint file` in `.msgpack` format (e.g,
                      ``./flax_model/`` containing ``flax_model.msgpack``). In this case, ``from_flax`` should be set
                      to :obj:`True`.
935
936
937
938
                    - :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
                      arguments ``config`` and ``state_dict``).
            model_args (sequence of positional arguments, `optional`):
                All remaning positional arguments will be passed to the underlying model's ``__init__`` method.
939
            config (:obj:`Union[PretrainedConfig, str, os.PathLike]`, `optional`):
940
941
942
                Can be either:

                    - an instance of a class derived from :class:`~transformers.PretrainedConfig`,
943
                    - a string or path valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`.
944
945
946
947

                Configuration for the model to use instead of an automatically loaded configuation. Configuration can
                be automatically loaded when:

948
949
                    - The model is a model provided by the library (loaded with the `model id` string of a pretrained
                      model).
950
                    - The model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
951
952
                      by supplying the save directory.
                    - The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
953
954
955
956
957
958
959
960
                      configuration JSON file named `config.json` is found in the directory.
            state_dict (:obj:`Dict[str, torch.Tensor]`, `optional`):
                A state dictionary to use instead of a state dictionary loaded from saved weights file.

                This option can be used if you want to create a model from a pretrained configuration but load your own
                weights. In this case though, you should check if using
                :func:`~transformers.PreTrainedModel.save_pretrained` and
                :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
961
            cache_dir (:obj:`Union[str, os.PathLike]`, `optional`):
962
963
964
965
966
                Path to a directory in which a downloaded pretrained model configuration should be cached if the
                standard cache should not be used.
            from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Load the model weights from a TensorFlow checkpoint save file (see docstring of
                ``pretrained_model_name_or_path`` argument).
967
968
969
            from_flax (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Load the model weights from a Flax checkpoint save file (see docstring of
                ``pretrained_model_name_or_path`` argument).
970
971
972
973
974
975
            force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to delete incompletely received files. Will attempt to resume the download if such a
                file exists.
Sylvain Gugger's avatar
Sylvain Gugger committed
976
            proxies (:obj:`Dict[str, str], `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
977
978
                A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
979
            output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Sylvain Gugger's avatar
Sylvain Gugger committed
980
                Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
981
            local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Stas Bekman's avatar
Stas Bekman committed
982
                Whether or not to only look at local files (i.e., do not try to download the model).
983
984
985
            use_auth_token (:obj:`str` or `bool`, `optional`):
                The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
                generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
Julien Chaumond's avatar
Julien Chaumond committed
986
987
988
989
            revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
                identifier allowed by git.
990
            mirror(:obj:`str`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
991
992
993
                Mirror source to accelerate downloads in China. If you are from China and have an accessibility
                problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
                Please refer to the mirror site for more information.
994
995
996
997
998
999
1000
1001
1002
1003
            _fast_init(:obj:`bool`, `optional`, defaults to `:obj:`True`):
                Whether or not to disable fast initialization.

                .. warning::

                    One should only disable `_fast_init` to ensure backwards compatibility with
                    ``transformers.__version__ < 4.6.0`` for seeded model initialization. This argument will be removed
                    at the next major version. See `pull request 11471
                    <https://github.com/huggingface/transformers/pull/11471>`__ for more information.

1004
1005
            kwargs (remaining dictionary of keyword arguments, `optional`):
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
1006
                :obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
                automatically loaded:

                    - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
                      underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
                      already been done)
                    - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
                      initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
                      ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
                      with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
                      attribute will be passed to the underlying model's ``__init__`` function.
1017

1018
1019
1020
1021
        .. note::

            Passing :obj:`use_auth_token=True` is required when you want to use a private model.

1022
1023
1024
1025
1026
1027
        .. note::

            Activate the special `"offline-mode"
            <https://huggingface.co/transformers/installation.html#offline-mode>`__ to use this method in a firewalled
            environment.

1028
        Examples::
thomwolf's avatar
thomwolf committed
1029

1030
            >>> from transformers import BertConfig, BertModel
1031
            >>> # Download model and configuration from huggingface.co and cache.
1032
1033
1034
1035
1036
1037
1038
1039
1040
            >>> model = BertModel.from_pretrained('bert-base-uncased')
            >>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable).
            >>> model = BertModel.from_pretrained('./test/saved_model/')
            >>> # Update configuration during loading.
            >>> model = BertModel.from_pretrained('bert-base-uncased', output_attentions=True)
            >>> assert model.config.output_attentions == True
            >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).
            >>> config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
            >>> model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
1041
1042
1043
            >>> # Loading from a Flax checkpoint file instead of a PyTorch model (slower)
            >>> model = BertModel.from_pretrained('bert-base-uncased', from_flax=True)

1044
        """
1045
1046
1047
1048
        config = kwargs.pop("config", None)
        state_dict = kwargs.pop("state_dict", None)
        cache_dir = kwargs.pop("cache_dir", None)
        from_tf = kwargs.pop("from_tf", False)
1049
        from_flax = kwargs.pop("from_flax", False)
1050
1051
1052
1053
        force_download = kwargs.pop("force_download", False)
        resume_download = kwargs.pop("resume_download", False)
        proxies = kwargs.pop("proxies", None)
        output_loading_info = kwargs.pop("output_loading_info", False)
1054
        local_files_only = kwargs.pop("local_files_only", False)
1055
        use_auth_token = kwargs.pop("use_auth_token", None)
Julien Chaumond's avatar
Julien Chaumond committed
1056
        revision = kwargs.pop("revision", None)
1057
        mirror = kwargs.pop("mirror", None)
1058
1059
        from_pipeline = kwargs.pop("_from_pipeline", None)
        from_auto_class = kwargs.pop("_from_auto", False)
1060
        _fast_init = kwargs.pop("_fast_init", True)
1061
1062
1063
1064

        user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class}
        if from_pipeline is not None:
            user_agent["using_pipeline"] = from_pipeline
thomwolf's avatar
thomwolf committed
1065

1066
1067
1068
1069
        if is_offline_mode() and not local_files_only:
            logger.info("Offline mode: forcing local_files_only=True")
            local_files_only = True

1070
1071
1072
        # Load config if we don't provide a configuration
        if not isinstance(config, PretrainedConfig):
            config_path = config if config is not None else pretrained_model_name_or_path
1073
            config, model_kwargs = cls.config_class.from_pretrained(
1074
1075
1076
1077
                config_path,
                *model_args,
                cache_dir=cache_dir,
                return_unused_kwargs=True,
1078
                force_download=force_download,
1079
                resume_download=resume_download,
1080
                proxies=proxies,
1081
                local_files_only=local_files_only,
1082
                use_auth_token=use_auth_token,
Julien Chaumond's avatar
Julien Chaumond committed
1083
                revision=revision,
1084
1085
                _from_auto=from_auto_class,
                _from_pipeline=from_pipeline,
1086
                **kwargs,
1087
1088
1089
            )
        else:
            model_kwargs = kwargs
1090

thomwolf's avatar
thomwolf committed
1091
        # Load model
thomwolf's avatar
thomwolf committed
1092
        if pretrained_model_name_or_path is not None:
1093
            pretrained_model_name_or_path = str(pretrained_model_name_or_path)
1094
            if os.path.isdir(pretrained_model_name_or_path):
thomwolf's avatar
thomwolf committed
1095
                if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
1096
                    # Load from a TF 1.0 checkpoint in priority if from_tf
thomwolf's avatar
thomwolf committed
1097
                    archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
thomwolf's avatar
thomwolf committed
1098
                elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
1099
                    # Load from a TF 2.0 checkpoint in priority if from_tf
thomwolf's avatar
thomwolf committed
1100
                    archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
1101
1102
1103
                elif from_flax and os.path.isfile(os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)):
                    # Load from a Flax checkpoint in priority if from_flax
                    archive_file = os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)
thomwolf's avatar
thomwolf committed
1104
1105
                elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
                    # Load from a PyTorch checkpoint
thomwolf's avatar
thomwolf committed
1106
                    archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
thomwolf's avatar
thomwolf committed
1107
                else:
1108
                    raise EnvironmentError(
1109
1110
                        f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + '.index', FLAX_WEIGHTS_NAME]} found in "
                        f"directory {pretrained_model_name_or_path} or `from_tf` and `from_flax` set to False."
1111
                    )
1112
            elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
1113
                archive_file = pretrained_model_name_or_path
1114
            elif os.path.isfile(pretrained_model_name_or_path + ".index"):
1115
1116
1117
1118
1119
                if not from_tf:
                    raise ValueError(
                        f"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set "
                        "from_tf to True to load from this checkpoint."
                    )
1120
                archive_file = pretrained_model_name_or_path + ".index"
1121
            else:
1122
1123
1124
1125
1126
1127
1128
1129
                # set correct filename
                if from_tf:
                    filename = TF2_WEIGHTS_NAME
                elif from_flax:
                    filename = FLAX_WEIGHTS_NAME
                else:
                    filename = WEIGHTS_NAME

thomwolf's avatar
thomwolf committed
1130
                archive_file = hf_bucket_url(
Julien Chaumond's avatar
Julien Chaumond committed
1131
                    pretrained_model_name_or_path,
1132
                    filename=filename,
Julien Chaumond's avatar
Julien Chaumond committed
1133
                    revision=revision,
1134
                    mirror=mirror,
thomwolf's avatar
thomwolf committed
1135
                )
1136

thomwolf's avatar
thomwolf committed
1137
            try:
1138
                # Load from URL or cache if already cached
1139
1140
1141
1142
1143
1144
                resolved_archive_file = cached_path(
                    archive_file,
                    cache_dir=cache_dir,
                    force_download=force_download,
                    proxies=proxies,
                    resume_download=resume_download,
1145
                    local_files_only=local_files_only,
1146
                    use_auth_token=use_auth_token,
1147
                    user_agent=user_agent,
1148
                )
Julien Chaumond's avatar
Julien Chaumond committed
1149
1150
            except EnvironmentError as err:
                logger.error(err)
1151
1152
1153
1154
1155
                msg = (
                    f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
                    f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
                    f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n"
                )
thomwolf's avatar
thomwolf committed
1156
1157
                raise EnvironmentError(msg)

thomwolf's avatar
thomwolf committed
1158
            if resolved_archive_file == archive_file:
1159
                logger.info(f"loading weights file {archive_file}")
1160
            else:
1161
                logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}")
1162
        else:
thomwolf's avatar
thomwolf committed
1163
            resolved_archive_file = None
1164

1165
1166
        config.name_or_path = pretrained_model_name_or_path

1167
        # Instantiate model.
1168
1169
1170
1171
        if is_deepspeed_zero3_enabled():
            import deepspeed

            logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
1172
1173
            # this immediately partitions the model across all gpus, to avoid the overhead in time
            # and memory copying it on CPU or each GPU first
1174
            with deepspeed.zero.Init(config=deepspeed_config()):
1175
1176
                with no_init_weights(_enable=_fast_init):
                    model = cls(config, *model_args, **model_kwargs)
1177
        else:
1178
1179
            with no_init_weights(_enable=_fast_init):
                model = cls(config, *model_args, **model_kwargs)
1180
1181

        if from_tf:
1182
            if resolved_archive_file.endswith(".index"):
1183
1184
1185
1186
1187
                # Load from a TensorFlow 1.X checkpoint - provided by original authors
                model = cls.load_tf_weights(model, config, resolved_archive_file[:-6])  # Remove the '.index'
            else:
                # Load from our TensorFlow 2.0 checkpoints
                try:
1188
                    from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model
1189

1190
                    model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
1191
                except ImportError:
1192
1193
1194
1195
                    logger.error(
                        "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
                        "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
                    )
1196
                    raise
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
        elif from_flax:
            try:
                from .modeling_flax_pytorch_utils import load_flax_checkpoint_in_pytorch_model

                model = load_flax_checkpoint_in_pytorch_model(model, resolved_archive_file)
            except ImportError:
                logger.error(
                    "Loading a Flax model in PyTorch, requires both PyTorch and Flax to be installed. Please see "
                    "https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation instructions."
                )
                raise
1208
        else:
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
            if state_dict is None:
                try:
                    state_dict = torch.load(resolved_archive_file, map_location="cpu")
                except Exception:
                    raise OSError(
                        f"Unable to load weights from pytorch checkpoint file for '{pretrained_model_name_or_path}' "
                        f"at '{resolved_archive_file}'"
                        "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
                    )

            model, missing_keys, unexpected_keys, error_msgs = cls._load_state_dict_into_model(
1220
                model, state_dict, pretrained_model_name_or_path, _fast_init=_fast_init
1221
1222
            )

1223
1224
        # make sure token embedding weights are still tied if needed
        model.tie_weights()
1225

1226
        # Set model in evaluation mode to deactivate DropOut modules by default
1227
1228
        model.eval()

thomwolf's avatar
thomwolf committed
1229
        if output_loading_info:
1230
1231
1232
1233
1234
            loading_info = {
                "missing_keys": missing_keys,
                "unexpected_keys": unexpected_keys,
                "error_msgs": error_msgs,
            }
thomwolf's avatar
thomwolf committed
1235
1236
            return model, loading_info

1237
1238
        return model

1239
    @classmethod
1240
    def _load_state_dict_into_model(cls, model, state_dict, pretrained_model_name_or_path, _fast_init=True):
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263

        # Convert old format to new format if needed from a PyTorch state_dict
        old_keys = []
        new_keys = []
        for key in state_dict.keys():
            new_key = None
            if "gamma" in key:
                new_key = key.replace("gamma", "weight")
            if "beta" in key:
                new_key = key.replace("beta", "bias")
            if new_key:
                old_keys.append(key)
                new_keys.append(new_key)
        for old_key, new_key in zip(old_keys, new_keys):
            state_dict[new_key] = state_dict.pop(old_key)

        # Retrieve missing & unexpected_keys
        expected_keys = list(model.state_dict().keys())
        loaded_keys = list(state_dict.keys())
        prefix = model.base_model_prefix

        has_prefix_module = any(s.startswith(prefix) for s in loaded_keys)
        expects_prefix_module = any(s.startswith(prefix) for s in expected_keys)
Patrick von Platen's avatar
Patrick von Platen committed
1264
1265
1266

        # key re-naming operations are never done on the keys
        # that are loaded, but always on the keys of the newly initialized model
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
        remove_prefix = not has_prefix_module and expects_prefix_module
        add_prefix = has_prefix_module and not expects_prefix_module

        if remove_prefix:
            expected_keys = [".".join(s.split(".")[1:]) if s.startswith(prefix) else s for s in expected_keys]
        elif add_prefix:
            expected_keys = [".".join([prefix, s]) for s in expected_keys]

        missing_keys = list(set(expected_keys) - set(loaded_keys))
        unexpected_keys = list(set(loaded_keys) - set(expected_keys))

        # Some models may have keys that are not in the state by design, removing them before needlessly warning
        # the user.
        if cls._keys_to_ignore_on_load_missing is not None:
            for pat in cls._keys_to_ignore_on_load_missing:
                missing_keys = [k for k in missing_keys if re.search(pat, k) is None]

        if cls._keys_to_ignore_on_load_unexpected is not None:
            for pat in cls._keys_to_ignore_on_load_unexpected:
                unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]

1288
1289
1290
1291
1292
1293
1294
1295
        if _fast_init:
            # retrieve unintialized modules and initialize
            unintialized_modules = model.retrieve_modules_from_names(
                missing_keys, add_prefix=add_prefix, remove_prefix=remove_prefix
            )
            for module in unintialized_modules:
                model._init_weights(module)

1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
        # copy state_dict so _load_from_state_dict can modify it
        metadata = getattr(state_dict, "_metadata", None)
        state_dict = state_dict.copy()
        if metadata is not None:
            state_dict._metadata = metadata

        error_msgs = []

        # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
        # so we need to apply the function recursively.
        def load(module: nn.Module, prefix=""):
            local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
            args = (state_dict, prefix, local_metadata, True, [], [], error_msgs)
            if is_deepspeed_zero3_enabled():
                import deepspeed

                # because zero3 puts placeholders in model params, this context
                # manager gathers (unpartitions) the params of the current layer, then loads from
                # the state dict and then re-partitions them again
                with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0):
                    if torch.distributed.get_rank() == 0:
                        module._load_from_state_dict(*args)
            else:
                module._load_from_state_dict(*args)

            for name, child in module._modules.items():
                if child is not None:
                    load(child, prefix + name + ".")

        # Make sure we are able to load base models as well as derived models (with heads)
        start_prefix = ""
        model_to_load = model
        if not hasattr(model, cls.base_model_prefix) and has_prefix_module:
            start_prefix = cls.base_model_prefix + "."
        if hasattr(model, cls.base_model_prefix) and not has_prefix_module:
            model_to_load = getattr(model, cls.base_model_prefix)

        load(model_to_load, prefix=start_prefix)

        if len(unexpected_keys) > 0:
            logger.warning(
                f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when "
                f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
                f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
                f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
                f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
                f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
            )
        else:
            logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
        if len(missing_keys) > 0:
            logger.warning(
                f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
                f"and are newly initialized: {missing_keys}\n"
                f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
            )
        else:
            logger.info(
                f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
                f"If your task is similar to the task the model of the checkpoint was trained on, "
                f"you can already use {model.__class__.__name__} for predictions without further training."
            )
        if len(error_msgs) > 0:
            error_msg = "\n\t".join(error_msgs)
            raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}")

        return model, missing_keys, unexpected_keys, error_msgs

    def retrieve_modules_from_names(self, names, add_prefix=False, remove_prefix=False):
        module_keys = set([".".join(key.split(".")[:-1]) for key in names])

Patrick von Platen's avatar
Patrick von Platen committed
1367
1368
1369
1370
        # torch.nn.ParameterList is a special case where two parameter keywords
        # are appended to the module name, *e.g.* bert.special_embeddings.0
        module_keys = module_keys.union(set([".".join(key.split(".")[:-2]) for key in names if key[-1].isdigit()]))

1371
1372
1373
1374
1375
1376
        retrieved_modules = []
        # retrieve all modules that has at least one missing weight name
        for name, module in self.named_modules():
            if remove_prefix:
                name = ".".join(name.split(".")[1:]) if name.startswith(self.base_model_prefix) else name
            elif add_prefix:
Patrick von Platen's avatar
Patrick von Platen committed
1377
                name = ".".join([self.base_model_prefix, name]) if len(name) > 0 else self.base_model_prefix
1378
1379
1380
1381
1382
1383

            if name in module_keys:
                retrieved_modules.append(module)

        return retrieved_modules

thomwolf's avatar
thomwolf committed
1384

thomwolf's avatar
thomwolf committed
1385
class Conv1D(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
    """
    1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).

    Basically works like a linear layer but the weights are transposed.

    Args:
        nf (:obj:`int`): The number of output features.
        nx (:obj:`int`): The number of input features.
    """

thomwolf's avatar
thomwolf committed
1396
    def __init__(self, nf, nx):
Julien Chaumond's avatar
Julien Chaumond committed
1397
        super().__init__()
thomwolf's avatar
thomwolf committed
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
        self.nf = nf
        w = torch.empty(nx, nf)
        nn.init.normal_(w, std=0.02)
        self.weight = nn.Parameter(w)
        self.bias = nn.Parameter(torch.zeros(nf))

    def forward(self, x):
        size_out = x.size()[:-1] + (self.nf,)
        x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
        x = x.view(*size_out)
        return x


thomwolf's avatar
thomwolf committed
1411
class PoolerStartLogits(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1412
1413
    """
    Compute SQuAD start logits from sequence hidden states.
1414

Sylvain Gugger's avatar
Sylvain Gugger committed
1415
1416
1417
1418
1419
1420
    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config used by the model, will be used to grab the :obj:`hidden_size` of the model.
    """

    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
1421
        super().__init__()
thomwolf's avatar
thomwolf committed
1422
1423
        self.dense = nn.Linear(config.hidden_size, 1)

Sylvain Gugger's avatar
Sylvain Gugger committed
1424
1425
1426
1427
1428
1429
1430
1431
    def forward(
        self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None
    ) -> torch.FloatTensor:
        """
        Args:
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
                The final hidden states of the model.
            p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1432
1433
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
Sylvain Gugger's avatar
Sylvain Gugger committed
1434
1435
1436

        Returns:
            :obj:`torch.FloatTensor`: The start logits for SQuAD.
thomwolf's avatar
thomwolf committed
1437
        """
thomwolf's avatar
thomwolf committed
1438
1439
1440
        x = self.dense(hidden_states).squeeze(-1)

        if p_mask is not None:
Lysandre Debut's avatar
Lysandre Debut committed
1441
            if get_parameter_dtype(self) == torch.float16:
1442
1443
1444
                x = x * (1 - p_mask) - 65500 * p_mask
            else:
                x = x * (1 - p_mask) - 1e30 * p_mask
thomwolf's avatar
thomwolf committed
1445
1446
1447
1448
1449
1450

        return x


class PoolerEndLogits(nn.Module):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1451
    Compute SQuAD end logits from sequence hidden states.
1452

Sylvain Gugger's avatar
Sylvain Gugger committed
1453
1454
1455
1456
1457
1458
1459
    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the
            :obj:`layer_norm_eps` to use.
    """

    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
1460
        super().__init__()
thomwolf's avatar
thomwolf committed
1461
1462
1463
1464
1465
        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
        self.activation = nn.Tanh()
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dense_1 = nn.Linear(config.hidden_size, 1)

Sylvain Gugger's avatar
Sylvain Gugger committed
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
    def forward(
        self,
        hidden_states: torch.FloatTensor,
        start_states: Optional[torch.FloatTensor] = None,
        start_positions: Optional[torch.LongTensor] = None,
        p_mask: Optional[torch.FloatTensor] = None,
    ) -> torch.FloatTensor:
        """
        Args:
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
                The final hidden states of the model.
            start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`):
                The hidden states of the first tokens for the labeled span.
            start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                The position of the first token for the labeled span.
            p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1482
1483
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
Sylvain Gugger's avatar
Sylvain Gugger committed
1484
1485
1486
1487
1488
1489
1490
1491

        .. note::

            One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set,
            ``start_positions`` overrides ``start_states``.

        Returns:
            :obj:`torch.FloatTensor`: The end logits for SQuAD.
thomwolf's avatar
thomwolf committed
1492
        """
1493
1494
1495
        assert (
            start_states is not None or start_positions is not None
        ), "One of start_states, start_positions should be not None"
thomwolf's avatar
thomwolf committed
1496
        if start_positions is not None:
1497
            slen, hsz = hidden_states.shape[-2:]
1498
1499
1500
            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            start_states = hidden_states.gather(-2, start_positions)  # shape (bsz, 1, hsz)
            start_states = start_states.expand(-1, slen, -1)  # shape (bsz, slen, hsz)
thomwolf's avatar
thomwolf committed
1501
1502
1503
1504
1505
1506
1507

        x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
        x = self.activation(x)
        x = self.LayerNorm(x)
        x = self.dense_1(x).squeeze(-1)

        if p_mask is not None:
Lysandre Debut's avatar
Lysandre Debut committed
1508
            if get_parameter_dtype(self) == torch.float16:
1509
1510
1511
                x = x * (1 - p_mask) - 65500 * p_mask
            else:
                x = x * (1 - p_mask) - 1e30 * p_mask
thomwolf's avatar
thomwolf committed
1512
1513
1514
1515
1516

        return x


class PoolerAnswerClass(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1517
1518
1519
1520
1521
1522
1523
    """
    Compute SQuAD 2.0 answer class from classification and start tokens hidden states.

    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config used by the model, will be used to grab the :obj:`hidden_size` of the model.
    """
1524

thomwolf's avatar
thomwolf committed
1525
    def __init__(self, config):
Julien Chaumond's avatar
Julien Chaumond committed
1526
        super().__init__()
thomwolf's avatar
thomwolf committed
1527
1528
1529
1530
        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
        self.activation = nn.Tanh()
        self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)

Sylvain Gugger's avatar
Sylvain Gugger committed
1531
1532
1533
1534
1535
1536
1537
    def forward(
        self,
        hidden_states: torch.FloatTensor,
        start_states: Optional[torch.FloatTensor] = None,
        start_positions: Optional[torch.LongTensor] = None,
        cls_index: Optional[torch.LongTensor] = None,
    ) -> torch.FloatTensor:
1538
1539
        """
        Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
                The final hidden states of the model.
            start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`):
                The hidden states of the first tokens for the labeled span.
            start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                The position of the first token for the labeled span.
            cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token.

        .. note::

            One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set,
            ``start_positions`` overrides ``start_states``.

        Returns:
            :obj:`torch.FloatTensor`: The SQuAD 2.0 answer class.
thomwolf's avatar
thomwolf committed
1556
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
1557
        # No dependency on end_feature so that we can obtain one single `cls_logits` for each sample.
1558
        hsz = hidden_states.shape[-1]
1559
1560
1561
        assert (
            start_states is not None or start_positions is not None
        ), "One of start_states, start_positions should be not None"
thomwolf's avatar
thomwolf committed
1562
        if start_positions is not None:
1563
1564
            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            start_states = hidden_states.gather(-2, start_positions).squeeze(-2)  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
1565
1566

        if cls_index is not None:
1567
1568
            cls_index = cls_index[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2)  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
1569
        else:
1570
            cls_token_state = hidden_states[:, -1, :]  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
1571
1572
1573
1574
1575
1576
1577
1578

        x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
        x = self.activation(x)
        x = self.dense_1(x).squeeze(-1)

        return x


1579
1580
1581
@dataclass
class SquadHeadOutput(ModelOutput):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1582
    Base class for outputs of question answering models using a :class:`~transformers.modeling_utils.SQuADHead`.
1583
1584
1585

    Args:
        loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Sylvain Gugger's avatar
Sylvain Gugger committed
1586
1587
            Classification loss as the sum of start token, end token (and is_impossible if provided) classification
            losses.
1588
1589
1590
1591
1592
        start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
            Log probabilities for the top config.start_n_top start token possibilities (beam-search).
        start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
            Indices for the top config.start_n_top start token possibilities (beam-search).
        end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Sylvain Gugger's avatar
Sylvain Gugger committed
1593
1594
            Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities
            (beam-search).
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
        end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
            Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
        cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
            Log probabilities for the ``is_impossible`` label of the answers.

    """

    loss: Optional[torch.FloatTensor] = None
    start_top_log_probs: Optional[torch.FloatTensor] = None
    start_top_index: Optional[torch.LongTensor] = None
    end_top_log_probs: Optional[torch.FloatTensor] = None
    end_top_index: Optional[torch.LongTensor] = None
    cls_logits: Optional[torch.FloatTensor] = None


thomwolf's avatar
thomwolf committed
1610
class SQuADHead(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1611
1612
    r"""
    A SQuAD head inspired by XLNet.
1613

Sylvain Gugger's avatar
Sylvain Gugger committed
1614
1615
1616
1617
    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the
            :obj:`layer_norm_eps` to use.
thomwolf's avatar
thomwolf committed
1618
    """
1619

thomwolf's avatar
thomwolf committed
1620
    def __init__(self, config):
Julien Chaumond's avatar
Julien Chaumond committed
1621
        super().__init__()
thomwolf's avatar
thomwolf committed
1622
1623
1624
1625
1626
1627
1628
        self.start_n_top = config.start_n_top
        self.end_n_top = config.end_n_top

        self.start_logits = PoolerStartLogits(config)
        self.end_logits = PoolerEndLogits(config)
        self.answer_class = PoolerAnswerClass(config)

Sylvain Gugger's avatar
Sylvain Gugger committed
1629
    @replace_return_docstrings(output_type=SquadHeadOutput, config_class=PretrainedConfig)
1630
    def forward(
1631
        self,
Sylvain Gugger's avatar
Sylvain Gugger committed
1632
1633
1634
1635
1636
1637
        hidden_states: torch.FloatTensor,
        start_positions: Optional[torch.LongTensor] = None,
        end_positions: Optional[torch.LongTensor] = None,
        cls_index: Optional[torch.LongTensor] = None,
        is_impossible: Optional[torch.LongTensor] = None,
        p_mask: Optional[torch.FloatTensor] = None,
1638
        return_dict: bool = False,
Sylvain Gugger's avatar
Sylvain Gugger committed
1639
1640
    ) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]:
        """
Lysandre's avatar
Lysandre committed
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
        Args:
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
                Final hidden states of the model on the sequence tokens.
            start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Positions of the first token for the labeled span.
            end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Positions of the last token for the labeled span.
            cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token.
            is_impossible (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Whether the question has a possible answer in the paragraph or not.
            p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1653
1654
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
Lysandre's avatar
Lysandre committed
1655
            return_dict (:obj:`bool`, `optional`, defaults to :obj:`False`):
1656
                Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
Sylvain Gugger's avatar
Sylvain Gugger committed
1657

Lysandre's avatar
Lysandre committed
1658
        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
1659
        """
thomwolf's avatar
thomwolf committed
1660
        start_logits = self.start_logits(hidden_states, p_mask=p_mask)
thomwolf's avatar
thomwolf committed
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683

        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, let's remove the dimension added by batch splitting
            for x in (start_positions, end_positions, cls_index, is_impossible):
                if x is not None and x.dim() > 1:
                    x.squeeze_(-1)

            # during training, compute the end logits based on the ground truth of the start position
            end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)

            loss_fct = CrossEntropyLoss()
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2

            if cls_index is not None and is_impossible is not None:
                # Predict answerability from the representation of CLS and START
                cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
                loss_fct_cls = nn.BCEWithLogitsLoss()
                cls_loss = loss_fct_cls(cls_logits, is_impossible)

                # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
                total_loss += cls_loss * 0.5
1684

1685
            return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,)
thomwolf's avatar
thomwolf committed
1686
1687
1688
1689

        else:
            # during inference, compute the end logits based on beam search
            bsz, slen, hsz = hidden_states.size()
1690
            start_log_probs = nn.functional.softmax(start_logits, dim=-1)  # shape (bsz, slen)
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701

            start_top_log_probs, start_top_index = torch.topk(
                start_log_probs, self.start_n_top, dim=-1
            )  # shape (bsz, start_n_top)
            start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz)  # shape (bsz, start_n_top, hsz)
            start_states = torch.gather(hidden_states, -2, start_top_index_exp)  # shape (bsz, start_n_top, hsz)
            start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1)  # shape (bsz, slen, start_n_top, hsz)

            hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
                start_states
            )  # shape (bsz, slen, start_n_top, hsz)
thomwolf's avatar
thomwolf committed
1702
1703
            p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
            end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
1704
            end_log_probs = nn.functional.softmax(end_logits, dim=1)  # shape (bsz, slen, start_n_top)
thomwolf's avatar
thomwolf committed
1705

1706
1707
1708
            end_top_log_probs, end_top_index = torch.topk(
                end_log_probs, self.end_n_top, dim=1
            )  # shape (bsz, end_n_top, start_n_top)
thomwolf's avatar
thomwolf committed
1709
1710
1711
1712
1713
1714
            end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
            end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)

            start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
            cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)

1715
            if not return_dict:
1716
1717
1718
1719
1720
1721
1722
1723
1724
                return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits)
            else:
                return SquadHeadOutput(
                    start_top_log_probs=start_top_log_probs,
                    start_top_index=start_top_index,
                    end_top_log_probs=end_top_log_probs,
                    end_top_index=end_top_index,
                    cls_logits=cls_logits,
                )
thomwolf's avatar
thomwolf committed
1725
1726
1727


class SequenceSummary(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1728
1729
1730
1731
1732
    r"""
    Compute a single vector summary of a sequence hidden states.

    Args:
        config (:class:`~transformers.PretrainedConfig`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1733
1734
            The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
            config class of your model for the default values it uses):
Sylvain Gugger's avatar
Sylvain Gugger committed
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746

            - **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are:

                - :obj:`"last"` -- Take the last token hidden state (like XLNet)
                - :obj:`"first"` -- Take the first token hidden state (like Bert)
                - :obj:`"mean"` -- Take the mean of all tokens hidden states
                - :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
                - :obj:`"attn"` -- Not implemented now, use multi-head attention

            - **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction.
            - **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to
              :obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`).
Sylvain Gugger's avatar
Sylvain Gugger committed
1747
            - **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the
Sylvain Gugger's avatar
Sylvain Gugger committed
1748
1749
1750
1751
1752
              output, another string or :obj:`None` will add no activation.
            - **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and
              activation.
            - **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and
              activation.
thomwolf's avatar
thomwolf committed
1753
    """
1754

1755
    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
1756
        super().__init__()
thomwolf's avatar
thomwolf committed
1757

1758
        self.summary_type = getattr(config, "summary_type", "last")
1759
        if self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
1760
1761
1762
1763
1764
            # We should use a standard multi-head attention module with absolute positional embedding for that.
            # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
            # We can probably just use the multi-head attention module of PyTorch >=1.1.0
            raise NotImplementedError

thomwolf's avatar
thomwolf committed
1765
        self.summary = Identity()
1766
1767
        if hasattr(config, "summary_use_proj") and config.summary_use_proj:
            if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
1768
                num_classes = config.num_labels
thomwolf's avatar
thomwolf committed
1769
1770
1771
1772
            else:
                num_classes = config.hidden_size
            self.summary = nn.Linear(config.hidden_size, num_classes)

1773
        activation_string = getattr(config, "summary_activation", None)
Lysandre's avatar
Lysandre committed
1774
        self.activation: Callable = get_activation(activation_string) if activation_string else Identity()
thomwolf's avatar
thomwolf committed
1775

thomwolf's avatar
thomwolf committed
1776
        self.first_dropout = Identity()
1777
        if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
1778
1779
            self.first_dropout = nn.Dropout(config.summary_first_dropout)

thomwolf's avatar
thomwolf committed
1780
        self.last_dropout = Identity()
1781
        if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
1782
            self.last_dropout = nn.Dropout(config.summary_last_dropout)
thomwolf's avatar
thomwolf committed
1783

Sylvain Gugger's avatar
Sylvain Gugger committed
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
    def forward(
        self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None
    ) -> torch.FloatTensor:
        """
        Compute a single vector summary of a sequence hidden states.

        Args:
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`[batch_size, seq_len, hidden_size]`):
                The hidden states of the last layer.
            cls_index (:obj:`torch.LongTensor` of shape :obj:`[batch_size]` or :obj:`[batch_size, ...]` where ... are optional leading dimensions of :obj:`hidden_states`, `optional`):
                Used if :obj:`summary_type == "cls_index"` and takes the last token of the sequence as classification
                token.

        Returns:
            :obj:`torch.FloatTensor`: The summary of the sequence hidden states.
thomwolf's avatar
thomwolf committed
1799
        """
1800
        if self.summary_type == "last":
thomwolf's avatar
thomwolf committed
1801
            output = hidden_states[:, -1]
1802
        elif self.summary_type == "first":
thomwolf's avatar
thomwolf committed
1803
            output = hidden_states[:, 0]
1804
        elif self.summary_type == "mean":
thomwolf's avatar
thomwolf committed
1805
            output = hidden_states.mean(dim=1)
1806
        elif self.summary_type == "cls_index":
thomwolf's avatar
thomwolf committed
1807
            if cls_index is None:
Lysandre's avatar
Lysandre committed
1808
1809
1810
1811
1812
                cls_index = torch.full_like(
                    hidden_states[..., :1, :],
                    hidden_states.shape[-2] - 1,
                    dtype=torch.long,
                )
thomwolf's avatar
thomwolf committed
1813
            else:
thomwolf's avatar
thomwolf committed
1814
                cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
1815
                cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
thomwolf's avatar
thomwolf committed
1816
            # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
1817
1818
            output = hidden_states.gather(-2, cls_index).squeeze(-2)  # shape (bsz, XX, hidden_size)
        elif self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
1819
1820
            raise NotImplementedError

1821
        output = self.first_dropout(output)
thomwolf's avatar
thomwolf committed
1822
1823
        output = self.summary(output)
        output = self.activation(output)
1824
        output = self.last_dropout(output)
thomwolf's avatar
thomwolf committed
1825
1826
1827
1828

        return output


1829
def unwrap_model(model: nn.Module) -> nn.Module:
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
    """
    Recursively unwraps a model from potential containers (as used in distributed training).

    Args:
        model (:obj:`torch.nn.Module`): The model to unwrap.
    """
    # since there could be multiple levels of wrapping, unwrap recursively
    if hasattr(model, "module"):
        return unwrap_model(model.module)
    else:
        return model


1843
def prune_linear_layer(layer: nn.Linear, index: torch.LongTensor, dim: int = 0) -> nn.Linear:
Sylvain Gugger's avatar
Sylvain Gugger committed
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
    """
    Prune a linear layer to keep only entries in index.

    Used to remove heads.

    Args:
        layer (:obj:`torch.nn.Linear`): The layer to prune.
        index (:obj:`torch.LongTensor`): The indices to keep in the layer.
        dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.

    Returns:
        :obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
    """
    index = index.to(layer.weight.device)
    W = layer.weight.index_select(dim, index).clone().detach()
    if layer.bias is not None:
        if dim == 1:
            b = layer.bias.clone().detach()
        else:
            b = layer.bias[index].clone().detach()
    new_size = list(layer.weight.size())
    new_size[dim] = len(index)
    new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
    new_layer.weight.requires_grad = False
    new_layer.weight.copy_(W.contiguous())
    new_layer.weight.requires_grad = True
    if layer.bias is not None:
        new_layer.bias.requires_grad = False
        new_layer.bias.copy_(b.contiguous())
        new_layer.bias.requires_grad = True
    return new_layer


Sylvain Gugger's avatar
Sylvain Gugger committed
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D:
    """
    Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights
    are transposed.

    Used to remove heads.

    Args:
        layer (:class:`~transformers.modeling_utils.Conv1D`): The layer to prune.
        index (:obj:`torch.LongTensor`): The indices to keep in the layer.
        dim (:obj:`int`, `optional`, defaults to 1): The dimension on which to keep the indices.

    Returns:
        :class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with :obj:`requires_grad=True`.
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
    """
    index = index.to(layer.weight.device)
    W = layer.weight.index_select(dim, index).clone().detach()
    if dim == 0:
        b = layer.bias.clone().detach()
    else:
        b = layer.bias[index].clone().detach()
    new_size = list(layer.weight.size())
    new_size[dim] = len(index)
    new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
    new_layer.weight.requires_grad = False
    new_layer.weight.copy_(W.contiguous())
    new_layer.weight.requires_grad = True
    new_layer.bias.requires_grad = False
    new_layer.bias.copy_(b.contiguous())
    new_layer.bias.requires_grad = True
    return new_layer
1908
1909


Sylvain Gugger's avatar
Sylvain Gugger committed
1910
def prune_layer(
1911
1912
    layer: Union[nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None
) -> Union[nn.Linear, Conv1D]:
Sylvain Gugger's avatar
Sylvain Gugger committed
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
    """
    Prune a Conv1D or linear layer to keep only entries in index.

    Used to remove heads.

    Args:
        layer (:obj:`Union[torch.nn.Linear, Conv1D]`): The layer to prune.
        index (:obj:`torch.LongTensor`): The indices to keep in the layer.
        dim (:obj:`int`, `optional`): The dimension on which to keep the indices.

    Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
1924
1925
        :obj:`torch.nn.Linear` or :class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with
        :obj:`requires_grad=True`.
1926
1927
1928
1929
1930
1931
    """
    if isinstance(layer, nn.Linear):
        return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
    elif isinstance(layer, Conv1D):
        return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
    else:
1932
        raise ValueError(f"Can't prune layer of class {layer.__class__}")
Patrick von Platen's avatar
Patrick von Platen committed
1933
1934
1935


def apply_chunking_to_forward(
1936
    forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors
Patrick von Platen's avatar
Patrick von Platen committed
1937
1938
) -> torch.Tensor:
    """
1939
1940
1941
1942
1943
    This function chunks the :obj:`input_tensors` into smaller input tensor parts of size :obj:`chunk_size` over the
    dimension :obj:`chunk_dim`. It then applies a layer :obj:`forward_fn` to each chunk independently to save memory.

    If the :obj:`forward_fn` is independent across the :obj:`chunk_dim` this function will yield the same result as
    directly applying :obj:`forward_fn` to :obj:`input_tensors`.
Patrick von Platen's avatar
Patrick von Platen committed
1944
1945

    Args:
1946
1947
        forward_fn (:obj:`Callable[..., torch.Tensor]`):
            The forward function of the model.
1948
1949
1950
1951
1952
        chunk_size (:obj:`int`):
            The chunk size of a chunked tensor: :obj:`num_chunks = len(input_tensors[0]) / chunk_size`.
        chunk_dim (:obj:`int`):
            The dimension over which the :obj:`input_tensors` should be chunked.
        input_tensors (:obj:`Tuple[torch.Tensor]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1953
1954
            The input tensors of ``forward_fn`` which will be chunked

Patrick von Platen's avatar
Patrick von Platen committed
1955
    Returns:
1956
        :obj:`torch.Tensor`: A tensor with the same shape as the :obj:`forward_fn` would have given if applied`.
Patrick von Platen's avatar
Patrick von Platen committed
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967


    Examples::

        # rename the usual forward() fn to forward_chunk()
        def forward_chunk(self, hidden_states):
            hidden_states = self.decoder(hidden_states)
            return hidden_states

        # implement a chunked forward function
        def forward(self, hidden_states):
1968
            return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
Patrick von Platen's avatar
Patrick von Platen committed
1969
1970
    """

1971
    assert len(input_tensors) > 0, f"{input_tensors} has to be a tuple/list of tensors"
1972
    tensor_shape = input_tensors[0].shape[chunk_dim]
Patrick von Platen's avatar
Patrick von Platen committed
1973
    assert all(
1974
        input_tensor.shape[chunk_dim] == tensor_shape for input_tensor in input_tensors
Patrick von Platen's avatar
Patrick von Platen committed
1975
1976
    ), "All input tenors have to be of the same shape"

1977
    # inspect.signature exist since python 3.5 and is a python method -> no problem with backward compatibility
Patrick von Platen's avatar
Patrick von Platen committed
1978
    num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters)
1979
1980
1981
1982
1983
    if num_args_in_forward_chunk_fn != len(input_tensors):
        raise ValueError(
            f"forward_chunk_fn expects {num_args_in_forward_chunk_fn} arguments, but only {len(input_tensors)} input "
            "tensors are given"
        )
Patrick von Platen's avatar
Patrick von Platen committed
1984
1985

    if chunk_size > 0:
1986
1987
1988
1989
1990
        if input_tensors[0].shape[chunk_dim] % chunk_size != 0:
            raise ValueError(
                f"The dimension to be chunked {input_tensors[0].shape[chunk_dim]} has to be a multiple of the chunk "
                f"size {chunk_size}"
            )
Patrick von Platen's avatar
Patrick von Platen committed
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001

        num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size

        # chunk input tensor into tuples
        input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors)
        # apply forward fn to every tuple
        output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks))
        # concatenate output at same dimension
        return torch.cat(output_chunks, dim=chunk_dim)

    return forward_fn(*input_tensors)