modeling_utils.py 84.9 KB
Newer Older
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Patrick von Platen's avatar
Patrick von Platen committed
17
import inspect
18
import os
19
import re
20
import warnings
21
from dataclasses import dataclass
22
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
23
24

import torch
25
from torch import Tensor, device, dtype, nn
26
27
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
28

29
from .activations import get_activation
30
from .configuration_utils import PretrainedConfig
31
from .file_utils import (
Aymeric Augustin's avatar
Aymeric Augustin committed
32
    DUMMY_INPUTS,
33
34
35
    TF2_WEIGHTS_NAME,
    TF_WEIGHTS_NAME,
    WEIGHTS_NAME,
36
    ModelOutput,
37
38
39
    cached_path,
    hf_bucket_url,
    is_remote_url,
40
    is_torch_tpu_available,
Sylvain Gugger's avatar
Sylvain Gugger committed
41
    replace_return_docstrings,
42
)
43
from .generation_utils import GenerationMixin
Lysandre Debut's avatar
Lysandre Debut committed
44
from .utils import logging
45

Aymeric Augustin's avatar
Aymeric Augustin committed
46

Lysandre Debut's avatar
Lysandre Debut committed
47
logger = logging.get_logger(__name__)
48

thomwolf's avatar
thomwolf committed
49
50
51
52
53
try:
    from torch.nn import Identity
except ImportError:
    # Older PyTorch compatibility
    class Identity(nn.Module):
Lysandre's avatar
Lysandre committed
54
        r"""A placeholder identity operator that is argument-insensitive."""
55

thomwolf's avatar
thomwolf committed
56
        def __init__(self, *args, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
57
            super().__init__()
thomwolf's avatar
thomwolf committed
58
59
60
61

        def forward(self, input):
            return input

62

63
def find_pruneable_heads_and_indices(
Sylvain Gugger's avatar
Sylvain Gugger committed
64
65
66
67
68
69
70
71
72
73
74
75
76
77
    heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int]
) -> Tuple[Set[int], torch.LongTensor]:
    """
    Finds the heads and their indices taking :obj:`already_pruned_heads` into account.

    Args:
        heads (:obj:`List[int]`): List of the indices of heads to prune.
        n_heads (:obj:`int`): The number of heads in the model.
        head_size (:obj:`int`): The size of each head.
        already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.

    Returns:
        :obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
    """
78
79
80
81
82
83
84
85
86
87
88
    mask = torch.ones(n_heads, head_size)
    heads = set(heads) - already_pruned_heads  # Convert to set and remove already pruned heads
    for head in heads:
        # Compute how many pruned heads are before the head and move the index accordingly
        head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
        mask[head] = 0
    mask = mask.view(-1).contiguous().eq(1)
    index: torch.LongTensor = torch.arange(len(mask))[mask].long()
    return heads, index


89
class ModuleUtilsMixin:
Julien Chaumond's avatar
Julien Chaumond committed
90
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
91
    A few utilities for :obj:`torch.nn.Modules`, to be used as a mixin.
Julien Chaumond's avatar
Julien Chaumond committed
92
93
    """

94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
    @staticmethod
    def _hook_rss_memory_pre_forward(module, *args, **kwargs):
        try:
            import psutil
        except (ImportError):
            raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")

        process = psutil.Process(os.getpid())
        mem = process.memory_info()
        module.mem_rss_pre_forward = mem.rss
        return None

    @staticmethod
    def _hook_rss_memory_post_forward(module, *args, **kwargs):
        try:
            import psutil
        except (ImportError):
            raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")

        process = psutil.Process(os.getpid())
        mem = process.memory_info()
        module.mem_rss_post_forward = mem.rss
        mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward
        module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0)
        return None

    def add_memory_hooks(self):
Sylvain Gugger's avatar
Sylvain Gugger committed
121
122
123
124
125
        """
        Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.

        Increase in memory consumption is stored in a :obj:`mem_rss_diff` attribute for each module and can be reset to
        zero with :obj:`model.reset_memory_hooks_state()`.
126
127
128
129
130
131
132
        """
        for module in self.modules():
            module.register_forward_pre_hook(self._hook_rss_memory_pre_forward)
            module.register_forward_hook(self._hook_rss_memory_post_forward)
        self.reset_memory_hooks_state()

    def reset_memory_hooks_state(self):
Sylvain Gugger's avatar
Sylvain Gugger committed
133
134
135
136
        """
        Reset the :obj:`mem_rss_diff` attribute of each module (see
        :func:`~transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks`).
        """
137
138
139
140
141
        for module in self.modules():
            module.mem_rss_diff = 0
            module.mem_rss_post_forward = 0
            module.mem_rss_pre_forward = 0

142
    @property
143
    def device(self) -> device:
144
        """
145
146
        :obj:`torch.device`: The device on which the module is (assuming that all the module parameters are on the same
        device).
147
        """
148
149
150
151
152
153
154
155
156
157
158
159
        try:
            return next(self.parameters()).device
        except StopIteration:
            # For nn.DataParallel compatibility in PyTorch 1.5

            def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
                tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
                return tuples

            gen = self._named_members(get_members_fn=find_tensor_attributes)
            first_tuple = next(gen)
            return first_tuple[1].device
160

161
162
    @property
    def dtype(self) -> dtype:
163
        """
164
        :obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
165
        """
166
167
168
169
170
171
172
173
174
175
176
177
        try:
            return next(self.parameters()).dtype
        except StopIteration:
            # For nn.DataParallel compatibility in PyTorch 1.5

            def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
                tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
                return tuples

            gen = self._named_members(get_members_fn=find_tensor_attributes)
            first_tuple = next(gen)
            return first_tuple[1].dtype
178
179

    def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:
Sylvain Gugger's avatar
Sylvain Gugger committed
180
181
182
183
184
185
186
187
188
        """
        Invert an attention mask (e.g., switches 0. and 1.).

        Args:
            encoder_attention_mask (:obj:`torch.Tensor`): An attention mask.

        Returns:
            :obj:`torch.Tensor`: The inverted attention mask.
        """
189
190
191
192
193
194
195
196
197
198
        if encoder_attention_mask.dim() == 3:
            encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
        if encoder_attention_mask.dim() == 2:
            encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
        # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
        # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
        # /transformer/transformer_layers.py#L270
        # encoder_extended_attention_mask = (encoder_extended_attention_mask ==
        # encoder_extended_attention_mask.transpose(-1, -2))
        encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype)  # fp16 compatibility
199
200
201
202
203
204
205
206
207
208
209
210

        if self.dtype == torch.float16:
            encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4
        elif self.dtype == torch.float32:
            encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
        else:
            raise ValueError(
                "{} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`".format(
                    self.dtype
                )
            )

211
212
        return encoder_extended_attention_mask

Sylvain Gugger's avatar
Sylvain Gugger committed
213
214
215
    def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device) -> Tensor:
        """
        Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
216
217

        Arguments:
Sylvain Gugger's avatar
Sylvain Gugger committed
218
219
220
221
222
223
            attention_mask (:obj:`torch.Tensor`):
                Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
            input_shape (:obj:`Tuple[int]`):
                The shape of the input to the model.
            device: (:obj:`torch.device`):
                The device of the input to the model.
224
225

        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
226
            :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
227
228
229
230
231
232
233
234
235
236
237
238
239
        """
        # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
        # ourselves in which case we just need to make it broadcastable to all heads.
        if attention_mask.dim() == 3:
            extended_attention_mask = attention_mask[:, None, :, :]
        elif attention_mask.dim() == 2:
            # Provided a padding mask of dimensions [batch_size, seq_length]
            # - if the model is a decoder, apply a causal mask in addition to the padding mask
            # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
            if self.config.is_decoder:
                batch_size, seq_length = input_shape
                seq_ids = torch.arange(seq_length, device=device)
                causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
240
                # in case past_key_values are used we need to add a prefix ones mask to the causal mask
Patrick von Platen's avatar
Patrick von Platen committed
241
242
243
                # causal and attention masks must have same type with pytorch version < 1.3
                causal_mask = causal_mask.to(attention_mask.dtype)

244
245
246
                if causal_mask.shape[1] < attention_mask.shape[1]:
                    prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
                    causal_mask = torch.cat(
Patrick von Platen's avatar
Patrick von Platen committed
247
248
249
250
251
252
253
                        [
                            torch.ones(
                                (batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
                            ),
                            causal_mask,
                        ],
                        axis=-1,
254
255
                    )

256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
                extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
            else:
                extended_attention_mask = attention_mask[:, None, None, :]
        else:
            raise ValueError(
                "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format(
                    input_shape, attention_mask.shape
                )
            )

        # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
        # masked positions, this operation will create a tensor which is 0.0 for
        # positions we want to attend and -10000.0 for masked positions.
        # Since we are adding it to the raw scores before the softmax, this is
        # effectively the same as removing these entirely.
        extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)  # fp16 compatibility
        extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
        return extended_attention_mask

Sylvain Gugger's avatar
Sylvain Gugger committed
275
276
277
    def get_head_mask(
        self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False
    ) -> Tensor:
278
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
279
280
281
282
283
284
285
286
287
288
        Prepare the head mask if needed.

        Args:
            head_mask (:obj:`torch.Tensor` with shape :obj:`[num_heads]` or :obj:`[num_hidden_layers x num_heads]`, `optional`):
                The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
            num_hidden_layers (:obj:`int`):
                The number of hidden layers in the model.
            is_attention_chunked: (:obj:`bool`, `optional, defaults to :obj:`False`):
                Whether or not the attentions scores are computed by chunks or not.

289
        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
290
291
            :obj:`torch.Tensor` with shape :obj:`[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or
            list with :obj:`[None]` for each layer.
292
293
294
        """
        if head_mask is not None:
            head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
Patrick von Platen's avatar
Patrick von Platen committed
295
296
            if is_attention_chunked is True:
                head_mask = head_mask.unsqueeze(-1)
297
298
299
300
301
302
303
304
305
306
307
308
309
        else:
            head_mask = [None] * num_hidden_layers

        return head_mask

    def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
        """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
        if head_mask.dim() == 1:
            head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
            head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
        elif head_mask.dim() == 2:
            head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # We can specify head_mask for each layer
        assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
310
        head_mask = head_mask.to(dtype=self.dtype)  # switch to float if need + fp16 compatibility
311
312
        return head_mask

313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
    def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int:
        """
        Get number of (optionally, trainable or non-embeddings) parameters in the module.

        Args:
            only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to return only the number of trainable parameters

            exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to return only the number of non-embeddings parameters

        Returns:
            :obj:`int`: The number of parameters.
        """

        def parameter_filter(x):
            return (x.requires_grad or not only_trainable) and not (
                isinstance(x, torch.nn.Embedding) and exclude_embeddings
            )

        params = filter(parameter_filter, self.parameters()) if only_trainable else self.parameters()
        return sum(p.numel() for p in params)

    def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int:
        """
        Helper function to estimate the total number of tokens from the model inputs.

        Args:
            inputs (:obj:`dict`): The model inputs.

        Returns:
            :obj:`int`: The total number of tokens.
        """
        token_inputs = [tensor for key, tensor in input_dict.items() if "input" in key]
        if token_inputs:
            return sum([token_input.numel() for token_input in token_inputs])
        else:
            warnings.warn(
                "Could not estimate the number of tokens of the input, floating-point operations will not be computed"
            )
            return 0

    def floating_point_ops(
        self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool = True
    ) -> int:
        """
        Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a
        batch with this transformer model. Default approximation neglects the quadratic dependency on the number of
Sylvain Gugger's avatar
Sylvain Gugger committed
361
        tokens (valid if :obj:`12 * d_model << sequence_length`) as laid out in `this paper
362
        <https://arxiv.org/pdf/2001.08361.pdf>`__ section 2.1. Should be overridden for transformers with parameter
Sylvain Gugger's avatar
Sylvain Gugger committed
363
        re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths.
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380

        Args:
            batch_size (:obj:`int`):
                The batch size for the forward pass.

            sequence_length (:obj:`int`):
                The number of tokens in each line of the batch.

            exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`True`):
                Whether or not to count embedding and softmax operations.

        Returns:
            :obj:`int`: The number of floating-point operations.
        """

        return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings)

Julien Chaumond's avatar
Julien Chaumond committed
381

382
class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin):
383
384
    r"""
    Base class for all models.
385

386
387
    :class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods
    for loading, downloading and saving models as well as a few methods common to all models to:
388

389
390
        * resize the input embeddings,
        * prune heads in the self-attention heads.
391

392
    Class attributes (overridden by derived classes):
Sylvain Gugger's avatar
Sylvain Gugger committed
393

394
395
        - **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of
          :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
Sylvain Gugger's avatar
Sylvain Gugger committed
396
397
        - **load_tf_weights** (:obj:`Callable`) -- A python `method` for loading a TensorFlow checkpoint in a PyTorch
          model, taking as arguments:
398

399
400
            - **model** (:class:`~transformers.PreTrainedModel`) -- An instance of the model on which to load the
              TensorFlow checkpoint.
Sylvain Gugger's avatar
Sylvain Gugger committed
401
402
            - **config** (:class:`~transformers.PreTrainedConfig`) -- An instance of the configuration associated to
              the model.
403
404
405
406
            - **path** (:obj:`str`) -- A path to the TensorFlow checkpoint.

        - **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in
          derived classes of the same architecture adding modules on top of the base model.
407
        - **is_parallelizable** (:obj:`bool`) -- A flag indicating whether this model supports model parallelization.
408
    """
409
    config_class = None
410
    base_model_prefix = ""
411
412
413
414
415
416
417
418
419
    # a list of re pattern of tensor names to ignore from the model when loading the model weights
    # (and avoid unnecessary warnings).
    _keys_to_ignore_on_load_missing = None
    # a list of re pattern of tensor names to ignore from the weights when loading the model weights
    # (and avoid unnecessary warnings).
    _keys_to_ignore_on_load_unexpected = None
    # a list of of tensor names to ignore when saving the model (useful for keys that aren't
    # trained, but which are deterministic)
    _keys_to_ignore_on_save = None
420

421
422
    is_parallelizable = False

423
    @property
424
    def dummy_inputs(self) -> Dict[str, torch.Tensor]:
425
426
        """
        :obj:`Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network.
427
        """
428
        return {"input_ids": torch.tensor(DUMMY_INPUTS)}
429

430
    def __init__(self, config: PretrainedConfig, *inputs, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
431
        super().__init__()
432
433
434
435
436
437
        if not isinstance(config, PretrainedConfig):
            raise ValueError(
                "Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
                "To create a model from a pretrained model use "
                "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
                    self.__class__.__name__, self.__class__.__name__
438
439
                )
            )
440
        # Save config and origin of the pretrained weights if given in model
441
        self.config = config
442
        self.name_or_path = config.name_or_path
443

444
    @property
445
446
447
448
    def base_model(self) -> nn.Module:
        """
        :obj:`torch.nn.Module`: The main body of the model.
        """
449
        return getattr(self, self.base_model_prefix, self)
thomwolf's avatar
thomwolf committed
450

451
    def get_input_embeddings(self) -> nn.Module:
452
453
454
455
        """
        Returns the model's input embeddings.

        Returns:
456
            :obj:`nn.Module`: A torch module mapping vocabulary to hidden states.
thomwolf's avatar
thomwolf committed
457
        """
458
        base_model = getattr(self, self.base_model_prefix, self)
thomwolf's avatar
thomwolf committed
459
460
461
462
        if base_model is not self:
            return base_model.get_input_embeddings()
        else:
            raise NotImplementedError
thomwolf's avatar
thomwolf committed
463

464
    def set_input_embeddings(self, value: nn.Module):
465
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
466
        Set model's input embeddings.
467
468

        Args:
469
            value (:obj:`nn.Module`): A module mapping vocabulary to hidden states.
thomwolf's avatar
thomwolf committed
470
471
472
473
474
475
        """
        base_model = getattr(self, self.base_model_prefix, self)
        if base_model is not self:
            base_model.set_input_embeddings(value)
        else:
            raise NotImplementedError
thomwolf's avatar
thomwolf committed
476

477
    def get_output_embeddings(self) -> nn.Module:
478
479
480
481
        """
        Returns the model's output embeddings.

        Returns:
482
            :obj:`nn.Module`: A torch module mapping hidden states to vocabulary.
thomwolf's avatar
thomwolf committed
483
        """
484
        return None  # Overwrite for models with output embeddings
thomwolf's avatar
thomwolf committed
485

486
    def tie_weights(self):
487
488
        """
        Tie the weights between the input embeddings and the output embeddings.
489
490

        If the :obj:`torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
491
        the weights instead.
thomwolf's avatar
thomwolf committed
492
        """
thomwolf's avatar
thomwolf committed
493
        output_embeddings = self.get_output_embeddings()
494
        if output_embeddings is not None and self.config.tie_word_embeddings:
thomwolf's avatar
thomwolf committed
495
            self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
thomwolf's avatar
thomwolf committed
496

497
        if self.config.is_encoder_decoder and self.config.tie_encoder_decoder:
Weizhen's avatar
Weizhen committed
498
499
            if hasattr(self, self.base_model_prefix):
                self = getattr(self, self.base_model_prefix)
500
501
502
503
504
            self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix)

    @staticmethod
    def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str):
        uninitialized_encoder_weights: List[str] = []
Weizhen's avatar
Weizhen committed
505
506
507
508
        if decoder.__class__ != encoder.__class__:
            logger.info(
                f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized."
            )
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540

        def tie_encoder_to_decoder_recursively(
            decoder_pointer: nn.Module,
            encoder_pointer: nn.Module,
            module_name: str,
            uninitialized_encoder_weights: List[str],
            depth=0,
        ):
            assert isinstance(decoder_pointer, nn.Module) and isinstance(
                encoder_pointer, nn.Module
            ), f"{decoder_pointer} and {encoder_pointer} have to be of type torch.nn.Module"
            if hasattr(decoder_pointer, "weight"):
                assert hasattr(encoder_pointer, "weight")
                encoder_pointer.weight = decoder_pointer.weight
                if hasattr(decoder_pointer, "bias"):
                    assert hasattr(encoder_pointer, "bias")
                    encoder_pointer.bias = decoder_pointer.bias
                return

            encoder_modules = encoder_pointer._modules
            decoder_modules = decoder_pointer._modules
            if len(decoder_modules) > 0:
                assert (
                    len(encoder_modules) > 0
                ), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"

                all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()])
                encoder_layer_pos = 0
                for name, module in decoder_modules.items():
                    if name.isdigit():
                        encoder_name = str(int(name) + encoder_layer_pos)
                        decoder_name = name
Weizhen's avatar
Weizhen committed
541
542
543
                        if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len(
                            encoder_modules
                        ) != len(decoder_modules):
544
545
                            # this can happen if the name corresponds to the position in a list module list of layers
                            # in this case the decoder has added a cross-attention that the encoder does not have
546
                            # thus skip this step and subtract one layer pos from encoder
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
                            encoder_layer_pos -= 1
                            continue
                    elif name not in encoder_modules:
                        continue
                    elif depth > 500:
                        raise ValueError(
                            "Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model."
                        )
                    else:
                        decoder_name = encoder_name = name
                    tie_encoder_to_decoder_recursively(
                        decoder_modules[decoder_name],
                        encoder_modules[encoder_name],
                        module_name + "/" + name,
                        uninitialized_encoder_weights,
                        depth=depth + 1,
                    )
                    all_encoder_weights.remove(module_name + "/" + encoder_name)

                uninitialized_encoder_weights += list(all_encoder_weights)

        # tie weights recursively
        tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights)
        if len(uninitialized_encoder_weights) > 0:
            logger.warning(
                f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}"
            )

575
    def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
Lysandre's avatar
Lysandre committed
576
        """Tie or clone module weights depending of whether we are using TorchScript or not"""
thomwolf's avatar
thomwolf committed
577
        if self.config.torchscript:
578
            output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
thomwolf's avatar
thomwolf committed
579
        else:
580
            output_embeddings.weight = input_embeddings.weight
thomwolf's avatar
thomwolf committed
581

Sam Shleifer's avatar
Sam Shleifer committed
582
        if getattr(output_embeddings, "bias", None) is not None:
583
584
            output_embeddings.bias.data = torch.nn.functional.pad(
                output_embeddings.bias.data,
Lysandre's avatar
Lysandre committed
585
586
587
588
                (
                    0,
                    output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],
                ),
589
590
                "constant",
                0,
591
            )
592
        if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
593
            output_embeddings.out_features = input_embeddings.num_embeddings
594

595
596
597
    def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> torch.nn.Embedding:
        """
        Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`.
598

599
        Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method.
thomwolf's avatar
thomwolf committed
600

601
602
603
604
        Arguments:
            new_num_tokens (:obj:`int`, `optional`):
                The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
                vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`,
605
                just returns a pointer to the input tokens :obj:`torch.nn.Embedding` module of the model without doing
606
607
608
609
                anything.

        Return:
            :obj:`torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model.
thomwolf's avatar
thomwolf committed
610
        """
611
        model_embeds = self._resize_token_embeddings(new_num_tokens)
thomwolf's avatar
thomwolf committed
612
613
        if new_num_tokens is None:
            return model_embeds
thomwolf's avatar
thomwolf committed
614
615
616

        # Update base model and current model config
        self.config.vocab_size = new_num_tokens
617
        self.vocab_size = new_num_tokens
thomwolf's avatar
thomwolf committed
618
619

        # Tie weights again if needed
620
        self.tie_weights()
thomwolf's avatar
thomwolf committed
621

thomwolf's avatar
thomwolf committed
622
623
        return model_embeds

624
    def _resize_token_embeddings(self, new_num_tokens):
thomwolf's avatar
thomwolf committed
625
626
627
        old_embeddings = self.get_input_embeddings()
        new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
        self.set_input_embeddings(new_embeddings)
628
629
630
631
632
633
634

        # if word embeddings are not tied, make sure that lm head is resized as well
        if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings:
            old_lm_head = self.get_output_embeddings()
            new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens)
            self.set_output_embeddings(new_lm_head)

thomwolf's avatar
thomwolf committed
635
        return self.get_input_embeddings()
636

637
638
639
    def _get_resized_embeddings(
        self, old_embeddings: torch.nn.Embedding, new_num_tokens: Optional[int] = None
    ) -> torch.nn.Embedding:
640
641
642
        """
        Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly
        initialized vectors at the end. Reducing the size will remove vectors from the end
643
644

        Args:
645
            old_embeddings (:obj:`torch.nn.Embedding`):
646
                Old embeddings to be resized.
647
            new_num_tokens (:obj:`int`, `optional`):
648
                New number of tokens in the embedding matrix.
649
650
651

                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
                vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
652
                :obj:`torch.nn.Embedding`` module of the model without doing anything.
653
654
655
656

        Return:
            :obj:`torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if
            :obj:`new_num_tokens` is :obj:`None`
657
658
659
660
661
662
663
664
        """
        if new_num_tokens is None:
            return old_embeddings

        old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
        if old_num_tokens == new_num_tokens:
            return old_embeddings

665
666
667
668
669
670
        if not isinstance(old_embeddings, nn.Embedding):
            raise TypeError(
                f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}."
                f"You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Embedding}."
            )

671
        # Build new embeddings
672
        new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim).to(self.device)
673
674
675
676

        # initialize all new embeddings (in particular added tokens)
        self._init_weights(new_embeddings)

677
        # Copy token embeddings from the previous weights
678
679
680
681
682
        num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
        new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]

        return new_embeddings

683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
    def _get_resized_lm_head(
        self, old_lm_head: torch.nn.Linear, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False
    ) -> torch.nn.Linear:
        """
        Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized
        vectors at the end. Reducing the size will remove vectors from the end

        Args:
            old_lm_head (:obj:`torch.nn.Linear`):
                Old lm head liner layer to be resized.
            new_num_tokens (:obj:`int`, `optional`):
                New number of tokens in the linear matrix.

                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
                vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
                :obj:`torch.nn.Linear`` module of the model without doing anything.
            transposed (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether ``old_lm_head`` is transposed or not. If True ``old_lm_head.size()`` is ``lm_head_dim,
                vocab_size`` else ``vocab_size, lm_head_dim``.

        Return:
            :obj:`torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if
            :obj:`new_num_tokens` is :obj:`None`
        """
        if new_num_tokens is None:
            return old_lm_head

        old_num_tokens, old_lm_head_dim = (
            old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size()
        )

        if old_num_tokens == new_num_tokens:
            return old_lm_head

        if not isinstance(old_lm_head, nn.Linear):
            raise TypeError(
                f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}."
                f"You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Linear}."
            )

        # Build new lm head
        new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim)
        has_new_lm_head_bias = old_lm_head.bias is not None
        new_lm_head = nn.Linear(*new_lm_head_shape, bias=has_new_lm_head_bias).to(self.device)

        # initialize new lm head (in particular added tokens)
        self._init_weights(new_lm_head)

        num_tokens_to_copy = min(old_num_tokens, new_num_tokens)

        # Copy old lm head weights to new lm head
        if not transposed:
            new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :]
        else:
            new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy]

        # Copy bias weights to new lm head
        if has_new_lm_head_bias:
            new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy]

        return new_lm_head

745
    def init_weights(self):
746
747
748
        """
        Initializes and prunes weights if needed.
        """
749
750
751
752
753
754
755
        # Initialize weights
        self.apply(self._init_weights)

        # Prune heads if needed
        if self.config.pruned_heads:
            self.prune_heads(self.config.pruned_heads)

756
757
758
        # Tie weights if needed
        self.tie_weights()

759
760
761
    def prune_heads(self, heads_to_prune: Dict[int, List[int]]):
        """
        Prunes heads of the base model.
762

763
764
        Arguments:
            heads_to_prune (:obj:`Dict[int, List[int]]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
765
766
767
                Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of
                heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads
                0 and 2 on layer 1 and heads 2 and 3 on layer 2.
thomwolf's avatar
thomwolf committed
768
        """
769
        # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
770
        for layer, heads in heads_to_prune.items():
771
772
773
            union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
            self.config.pruned_heads[layer] = list(union_heads)  # Unfortunately we have to store it as list for JSON

774
        self.base_model._prune_heads(heads_to_prune)
thomwolf's avatar
thomwolf committed
775

776
    def save_pretrained(self, save_directory: Union[str, os.PathLike]):
777
778
779
        """
        Save a model and its configuration file to a directory, so that it can be re-loaded using the
        `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
780

781
        Arguments:
782
            save_directory (:obj:`str` or :obj:`os.PathLike`):
783
                Directory to which to save. Will be created if it doesn't exist.
784
        """
785
786
787
788
        if os.path.isfile(save_directory):
            logger.error("Provided path ({}) should be a directory, not a file".format(save_directory))
            return
        os.makedirs(save_directory, exist_ok=True)
789

Julien Chaumond's avatar
Julien Chaumond committed
790
        # Only save the model itself if we are using distributed training
791
        model_to_save = self.module if hasattr(self, "module") else self
792

Julien Chaumond's avatar
Julien Chaumond committed
793
794
795
        # Attach architecture to the config
        model_to_save.config.architectures = [model_to_save.__class__.__name__]

796
797
798
        state_dict = model_to_save.state_dict()

        # Handle the case where some state_dict keys shouldn't be saved
799
800
        if self._keys_to_ignore_on_save is not None:
            state_dict = {k: v for k, v in state_dict.items() if k not in self._keys_to_ignore_on_save}
801

802
803
        # If we save using the predefined names, we can load using `from_pretrained`
        output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
804

805
        if getattr(self.config, "xla_device", False) and is_torch_tpu_available():
806
807
808
809
810
811
            import torch_xla.core.xla_model as xm

            if xm.is_master_ordinal():
                # Save configuration file
                model_to_save.config.save_pretrained(save_directory)
            # xm.save takes care of saving only from master
812
            xm.save(state_dict, output_model_file)
813
814
        else:
            model_to_save.config.save_pretrained(save_directory)
815
            torch.save(state_dict, output_model_file)
816

thomwolf's avatar
thomwolf committed
817
        logger.info("Model weights saved in {}".format(output_model_file))
818

819
    @classmethod
820
    def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
821
822
        r"""
        Instantiate a pretrained pytorch model from a pre-trained model configuration.
823

Sylvain Gugger's avatar
Sylvain Gugger committed
824
825
        The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated). To
        train the model, you should first set it back in training mode with ``model.train()``.
826

827
828
829
        The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come
        pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
        task.
830

831
832
        The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those
        weights are discarded.
833

834
        Parameters:
835
            pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`, `optional`):
836
837
                Can be either:

838
839
840
                    - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
                      Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
                      a user or organization name, like ``dbmdz/bert-base-german-cased``.
841
842
                    - A path to a `directory` containing model weights saved using
                      :func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
Sylvain Gugger's avatar
Sylvain Gugger committed
843
                    - A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
844
845
846
847
848
849
850
                      this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
                      as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
                      a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
                    - :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
                      arguments ``config`` and ``state_dict``).
            model_args (sequence of positional arguments, `optional`):
                All remaning positional arguments will be passed to the underlying model's ``__init__`` method.
851
            config (:obj:`Union[PretrainedConfig, str, os.PathLike]`, `optional`):
852
853
854
                Can be either:

                    - an instance of a class derived from :class:`~transformers.PretrainedConfig`,
855
                    - a string or path valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`.
856
857
858
859

                Configuration for the model to use instead of an automatically loaded configuation. Configuration can
                be automatically loaded when:

860
861
                    - The model is a model provided by the library (loaded with the `model id` string of a pretrained
                      model).
862
                    - The model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
863
864
                      by supplying the save directory.
                    - The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
865
866
867
868
869
870
871
872
                      configuration JSON file named `config.json` is found in the directory.
            state_dict (:obj:`Dict[str, torch.Tensor]`, `optional`):
                A state dictionary to use instead of a state dictionary loaded from saved weights file.

                This option can be used if you want to create a model from a pretrained configuration but load your own
                weights. In this case though, you should check if using
                :func:`~transformers.PreTrainedModel.save_pretrained` and
                :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
873
            cache_dir (:obj:`Union[str, os.PathLike]`, `optional`):
874
875
876
877
878
879
880
881
882
883
884
                Path to a directory in which a downloaded pretrained model configuration should be cached if the
                standard cache should not be used.
            from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Load the model weights from a TensorFlow checkpoint save file (see docstring of
                ``pretrained_model_name_or_path`` argument).
            force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to delete incompletely received files. Will attempt to resume the download if such a
                file exists.
Sylvain Gugger's avatar
Sylvain Gugger committed
885
            proxies (:obj:`Dict[str, str], `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
886
887
                A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
888
            output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Sylvain Gugger's avatar
Sylvain Gugger committed
889
                Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
890
            local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Stas Bekman's avatar
Stas Bekman committed
891
                Whether or not to only look at local files (i.e., do not try to download the model).
892
893
894
            use_auth_token (:obj:`str` or `bool`, `optional`):
                The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
                generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
Julien Chaumond's avatar
Julien Chaumond committed
895
896
897
898
            revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
                identifier allowed by git.
899
            mirror(:obj:`str`, `optional`, defaults to :obj:`None`):
Sylvain Gugger's avatar
Sylvain Gugger committed
900
901
902
                Mirror source to accelerate downloads in China. If you are from China and have an accessibility
                problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
                Please refer to the mirror site for more information.
903
904
            kwargs (remaining dictionary of keyword arguments, `optional`):
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
905
                :obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
906
907
908
909
910
911
912
913
914
915
                automatically loaded:

                    - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
                      underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
                      already been done)
                    - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
                      initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
                      ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
                      with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
                      attribute will be passed to the underlying model's ``__init__`` function.
916

917
918
919
920
        .. note::

            Passing :obj:`use_auth_token=True` is required when you want to use a private model.

921
        Examples::
thomwolf's avatar
thomwolf committed
922

923
            >>> from transformers import BertConfig, BertModel
924
            >>> # Download model and configuration from huggingface.co and cache.
925
926
927
928
929
930
931
932
933
            >>> model = BertModel.from_pretrained('bert-base-uncased')
            >>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable).
            >>> model = BertModel.from_pretrained('./test/saved_model/')
            >>> # Update configuration during loading.
            >>> model = BertModel.from_pretrained('bert-base-uncased', output_attentions=True)
            >>> assert model.config.output_attentions == True
            >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).
            >>> config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
            >>> model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
934
        """
935
936
937
938
939
940
941
942
        config = kwargs.pop("config", None)
        state_dict = kwargs.pop("state_dict", None)
        cache_dir = kwargs.pop("cache_dir", None)
        from_tf = kwargs.pop("from_tf", False)
        force_download = kwargs.pop("force_download", False)
        resume_download = kwargs.pop("resume_download", False)
        proxies = kwargs.pop("proxies", None)
        output_loading_info = kwargs.pop("output_loading_info", False)
943
        local_files_only = kwargs.pop("local_files_only", False)
944
        use_auth_token = kwargs.pop("use_auth_token", None)
Julien Chaumond's avatar
Julien Chaumond committed
945
        revision = kwargs.pop("revision", None)
946
        mirror = kwargs.pop("mirror", None)
thomwolf's avatar
thomwolf committed
947

948
949
950
        # Load config if we don't provide a configuration
        if not isinstance(config, PretrainedConfig):
            config_path = config if config is not None else pretrained_model_name_or_path
951
            config, model_kwargs = cls.config_class.from_pretrained(
952
953
954
955
                config_path,
                *model_args,
                cache_dir=cache_dir,
                return_unused_kwargs=True,
956
                force_download=force_download,
957
                resume_download=resume_download,
958
                proxies=proxies,
959
                local_files_only=local_files_only,
960
                use_auth_token=use_auth_token,
Julien Chaumond's avatar
Julien Chaumond committed
961
                revision=revision,
962
                **kwargs,
963
964
965
            )
        else:
            model_kwargs = kwargs
966

thomwolf's avatar
thomwolf committed
967
        # Load model
thomwolf's avatar
thomwolf committed
968
        if pretrained_model_name_or_path is not None:
969
            pretrained_model_name_or_path = str(pretrained_model_name_or_path)
970
            if os.path.isdir(pretrained_model_name_or_path):
thomwolf's avatar
thomwolf committed
971
                if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
972
                    # Load from a TF 1.0 checkpoint in priority if from_tf
thomwolf's avatar
thomwolf committed
973
                    archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
thomwolf's avatar
thomwolf committed
974
                elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
975
                    # Load from a TF 2.0 checkpoint in priority if from_tf
thomwolf's avatar
thomwolf committed
976
977
978
                    archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
                elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
                    # Load from a PyTorch checkpoint
thomwolf's avatar
thomwolf committed
979
                    archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
thomwolf's avatar
thomwolf committed
980
                else:
981
982
                    raise EnvironmentError(
                        "Error no file named {} found in directory {} or `from_tf` set to False".format(
Patrick von Platen's avatar
Patrick von Platen committed
983
                            [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"],
Patrick von Platen's avatar
Patrick von Platen committed
984
                            pretrained_model_name_or_path,
985
986
                        )
                    )
987
            elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
988
                archive_file = pretrained_model_name_or_path
989
            elif os.path.isfile(pretrained_model_name_or_path + ".index"):
990
991
992
993
994
                assert (
                    from_tf
                ), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
                    pretrained_model_name_or_path + ".index"
                )
995
                archive_file = pretrained_model_name_or_path + ".index"
996
            else:
thomwolf's avatar
thomwolf committed
997
                archive_file = hf_bucket_url(
Julien Chaumond's avatar
Julien Chaumond committed
998
999
                    pretrained_model_name_or_path,
                    filename=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME),
Julien Chaumond's avatar
Julien Chaumond committed
1000
                    revision=revision,
1001
                    mirror=mirror,
thomwolf's avatar
thomwolf committed
1002
                )
1003

thomwolf's avatar
thomwolf committed
1004
            try:
1005
                # Load from URL or cache if already cached
1006
1007
1008
1009
1010
1011
                resolved_archive_file = cached_path(
                    archive_file,
                    cache_dir=cache_dir,
                    force_download=force_download,
                    proxies=proxies,
                    resume_download=resume_download,
1012
                    local_files_only=local_files_only,
1013
                    use_auth_token=use_auth_token,
1014
                )
Julien Chaumond's avatar
Julien Chaumond committed
1015
1016
            except EnvironmentError as err:
                logger.error(err)
1017
1018
1019
1020
1021
                msg = (
                    f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
                    f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
                    f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n"
                )
thomwolf's avatar
thomwolf committed
1022
1023
                raise EnvironmentError(msg)

thomwolf's avatar
thomwolf committed
1024
1025
            if resolved_archive_file == archive_file:
                logger.info("loading weights file {}".format(archive_file))
1026
            else:
1027
                logger.info("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file))
1028
        else:
thomwolf's avatar
thomwolf committed
1029
            resolved_archive_file = None
1030

1031
1032
        config.name_or_path = pretrained_model_name_or_path

1033
        # Instantiate model.
1034
        model = cls(config, *model_args, **model_kwargs)
thomwolf's avatar
thomwolf committed
1035

1036
        if state_dict is None and not from_tf:
1037
            try:
1038
                state_dict = torch.load(resolved_archive_file, map_location="cpu")
1039
            except Exception:
1040
                raise OSError(
1041
1042
                    f"Unable to load weights from pytorch checkpoint file for '{pretrained_model_name_or_path}' "
                    f"at '{resolved_archive_file}'"
1043
1044
                    "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
                )
1045

1046
1047
1048
        missing_keys = []
        unexpected_keys = []
        error_msgs = []
1049
1050

        if from_tf:
1051
            if resolved_archive_file.endswith(".index"):
1052
1053
1054
1055
1056
                # Load from a TensorFlow 1.X checkpoint - provided by original authors
                model = cls.load_tf_weights(model, config, resolved_archive_file[:-6])  # Remove the '.index'
            else:
                # Load from our TensorFlow 2.0 checkpoints
                try:
1057
                    from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model
1058

1059
                    model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
1060
                except ImportError:
1061
1062
1063
1064
                    logger.error(
                        "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
                        "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
                    )
1065
                    raise
1066
1067
1068
1069
1070
1071
        else:
            # Convert old format to new format if needed from a PyTorch state_dict
            old_keys = []
            new_keys = []
            for key in state_dict.keys():
                new_key = None
1072
1073
1074
1075
                if "gamma" in key:
                    new_key = key.replace("gamma", "weight")
                if "beta" in key:
                    new_key = key.replace("beta", "bias")
1076
1077
1078
1079
1080
1081
1082
                if new_key:
                    old_keys.append(key)
                    new_keys.append(new_key)
            for old_key, new_key in zip(old_keys, new_keys):
                state_dict[new_key] = state_dict.pop(old_key)

            # copy state_dict so _load_from_state_dict can modify it
1083
            metadata = getattr(state_dict, "_metadata", None)
1084
1085
1086
1087
            state_dict = state_dict.copy()
            if metadata is not None:
                state_dict._metadata = metadata

1088
1089
            # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
            # so we need to apply the function recursively.
Julien Chaumond's avatar
Julien Chaumond committed
1090
            def load(module: nn.Module, prefix=""):
1091
1092
                local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
                module._load_from_state_dict(
Lysandre's avatar
Lysandre committed
1093
1094
1095
1096
1097
1098
1099
                    state_dict,
                    prefix,
                    local_metadata,
                    True,
                    missing_keys,
                    unexpected_keys,
                    error_msgs,
1100
                )
1101
1102
                for name, child in module._modules.items():
                    if child is not None:
1103
                        load(child, prefix + name + ".")
1104
1105

            # Make sure we are able to load base models as well as derived models (with heads)
1106
            start_prefix = ""
1107
            model_to_load = model
1108
1109
            has_prefix_module = any(s.startswith(cls.base_model_prefix) for s in state_dict.keys())
            if not hasattr(model, cls.base_model_prefix) and has_prefix_module:
1110
                start_prefix = cls.base_model_prefix + "."
1111
            if hasattr(model, cls.base_model_prefix) and not has_prefix_module:
1112
1113
1114
                model_to_load = getattr(model, cls.base_model_prefix)

            load(model_to_load, prefix=start_prefix)
1115
1116
1117
1118
1119
1120
1121
1122

            if model.__class__.__name__ != model_to_load.__class__.__name__:
                base_model_state_dict = model_to_load.state_dict().keys()
                head_model_state_dict_without_base_prefix = [
                    key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys()
                ]
                missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict)

1123
1124
            # Some models may have keys that are not in the state by design, removing them before needlessly warning
            # the user.
1125
1126
            if cls._keys_to_ignore_on_load_missing is not None:
                for pat in cls._keys_to_ignore_on_load_missing:
1127
1128
                    missing_keys = [k for k in missing_keys if re.search(pat, k) is None]

1129
1130
            if cls._keys_to_ignore_on_load_unexpected is not None:
                for pat in cls._keys_to_ignore_on_load_unexpected:
1131
1132
                    unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]

1133
1134
1135
1136
1137
            if len(unexpected_keys) > 0:
                logger.warning(
                    f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when "
                    f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
                    f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
1138
                    f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
1139
1140
1141
1142
1143
                    f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
                    f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
                )
            else:
                logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
1144
            if len(missing_keys) > 0:
1145
1146
1147
1148
                logger.warning(
                    f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
                    f"and are newly initialized: {missing_keys}\n"
                    f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
1149
                )
1150
            else:
1151
                logger.info(
1152
                    f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
Prajjwal Bhargava's avatar
Prajjwal Bhargava committed
1153
                    f"If your task is similar to the task the model of the checkpoint was trained on, "
1154
                    f"you can already use {model.__class__.__name__} for predictions without further training."
1155
                )
1156
            if len(error_msgs) > 0:
1157
1158
1159
1160
1161
                raise RuntimeError(
                    "Error(s) in loading state_dict for {}:\n\t{}".format(
                        model.__class__.__name__, "\n\t".join(error_msgs)
                    )
                )
1162
1163
        # make sure token embedding weights are still tied if needed
        model.tie_weights()
1164

1165
        # Set model in evaluation mode to deactivate DropOut modules by default
1166
1167
        model.eval()

thomwolf's avatar
thomwolf committed
1168
        if output_loading_info:
1169
1170
1171
1172
1173
            loading_info = {
                "missing_keys": missing_keys,
                "unexpected_keys": unexpected_keys,
                "error_msgs": error_msgs,
            }
thomwolf's avatar
thomwolf committed
1174
1175
            return model, loading_info

1176
        if hasattr(config, "xla_device") and config.xla_device and is_torch_tpu_available():
1177
1178
1179
            import torch_xla.core.xla_model as xm

            model = xm.send_cpu_data_to_device(model, xm.xla_device())
1180
            model.to(xm.xla_device())
1181

1182
1183
        return model

thomwolf's avatar
thomwolf committed
1184

thomwolf's avatar
thomwolf committed
1185
class Conv1D(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
    """
    1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).

    Basically works like a linear layer but the weights are transposed.

    Args:
        nf (:obj:`int`): The number of output features.
        nx (:obj:`int`): The number of input features.
    """

thomwolf's avatar
thomwolf committed
1196
    def __init__(self, nf, nx):
Julien Chaumond's avatar
Julien Chaumond committed
1197
        super().__init__()
thomwolf's avatar
thomwolf committed
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
        self.nf = nf
        w = torch.empty(nx, nf)
        nn.init.normal_(w, std=0.02)
        self.weight = nn.Parameter(w)
        self.bias = nn.Parameter(torch.zeros(nf))

    def forward(self, x):
        size_out = x.size()[:-1] + (self.nf,)
        x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
        x = x.view(*size_out)
        return x


thomwolf's avatar
thomwolf committed
1211
class PoolerStartLogits(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1212
1213
    """
    Compute SQuAD start logits from sequence hidden states.
1214

Sylvain Gugger's avatar
Sylvain Gugger committed
1215
1216
1217
1218
1219
1220
    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config used by the model, will be used to grab the :obj:`hidden_size` of the model.
    """

    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
1221
        super().__init__()
thomwolf's avatar
thomwolf committed
1222
1223
        self.dense = nn.Linear(config.hidden_size, 1)

Sylvain Gugger's avatar
Sylvain Gugger committed
1224
1225
1226
1227
1228
1229
1230
1231
    def forward(
        self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None
    ) -> torch.FloatTensor:
        """
        Args:
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
                The final hidden states of the model.
            p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1232
1233
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
Sylvain Gugger's avatar
Sylvain Gugger committed
1234
1235
1236

        Returns:
            :obj:`torch.FloatTensor`: The start logits for SQuAD.
thomwolf's avatar
thomwolf committed
1237
        """
thomwolf's avatar
thomwolf committed
1238
1239
1240
        x = self.dense(hidden_states).squeeze(-1)

        if p_mask is not None:
1241
1242
1243
1244
            if next(self.parameters()).dtype == torch.float16:
                x = x * (1 - p_mask) - 65500 * p_mask
            else:
                x = x * (1 - p_mask) - 1e30 * p_mask
thomwolf's avatar
thomwolf committed
1245
1246
1247
1248
1249
1250

        return x


class PoolerEndLogits(nn.Module):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1251
    Compute SQuAD end logits from sequence hidden states.
1252

Sylvain Gugger's avatar
Sylvain Gugger committed
1253
1254
1255
1256
1257
1258
1259
    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the
            :obj:`layer_norm_eps` to use.
    """

    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
1260
        super().__init__()
thomwolf's avatar
thomwolf committed
1261
1262
1263
1264
1265
        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
        self.activation = nn.Tanh()
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dense_1 = nn.Linear(config.hidden_size, 1)

Sylvain Gugger's avatar
Sylvain Gugger committed
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
    def forward(
        self,
        hidden_states: torch.FloatTensor,
        start_states: Optional[torch.FloatTensor] = None,
        start_positions: Optional[torch.LongTensor] = None,
        p_mask: Optional[torch.FloatTensor] = None,
    ) -> torch.FloatTensor:
        """
        Args:
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
                The final hidden states of the model.
            start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`):
                The hidden states of the first tokens for the labeled span.
            start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                The position of the first token for the labeled span.
            p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1282
1283
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
Sylvain Gugger's avatar
Sylvain Gugger committed
1284
1285
1286
1287
1288
1289
1290
1291

        .. note::

            One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set,
            ``start_positions`` overrides ``start_states``.

        Returns:
            :obj:`torch.FloatTensor`: The end logits for SQuAD.
thomwolf's avatar
thomwolf committed
1292
        """
1293
1294
1295
        assert (
            start_states is not None or start_positions is not None
        ), "One of start_states, start_positions should be not None"
thomwolf's avatar
thomwolf committed
1296
        if start_positions is not None:
1297
            slen, hsz = hidden_states.shape[-2:]
1298
1299
1300
            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            start_states = hidden_states.gather(-2, start_positions)  # shape (bsz, 1, hsz)
            start_states = start_states.expand(-1, slen, -1)  # shape (bsz, slen, hsz)
thomwolf's avatar
thomwolf committed
1301
1302
1303
1304
1305
1306
1307

        x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
        x = self.activation(x)
        x = self.LayerNorm(x)
        x = self.dense_1(x).squeeze(-1)

        if p_mask is not None:
1308
1309
1310
1311
            if next(self.parameters()).dtype == torch.float16:
                x = x * (1 - p_mask) - 65500 * p_mask
            else:
                x = x * (1 - p_mask) - 1e30 * p_mask
thomwolf's avatar
thomwolf committed
1312
1313
1314
1315
1316

        return x


class PoolerAnswerClass(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1317
1318
1319
1320
1321
1322
1323
    """
    Compute SQuAD 2.0 answer class from classification and start tokens hidden states.

    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config used by the model, will be used to grab the :obj:`hidden_size` of the model.
    """
1324

thomwolf's avatar
thomwolf committed
1325
    def __init__(self, config):
Julien Chaumond's avatar
Julien Chaumond committed
1326
        super().__init__()
thomwolf's avatar
thomwolf committed
1327
1328
1329
1330
        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
        self.activation = nn.Tanh()
        self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)

Sylvain Gugger's avatar
Sylvain Gugger committed
1331
1332
1333
1334
1335
1336
1337
    def forward(
        self,
        hidden_states: torch.FloatTensor,
        start_states: Optional[torch.FloatTensor] = None,
        start_positions: Optional[torch.LongTensor] = None,
        cls_index: Optional[torch.LongTensor] = None,
    ) -> torch.FloatTensor:
1338
1339
        """
        Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
                The final hidden states of the model.
            start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`):
                The hidden states of the first tokens for the labeled span.
            start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                The position of the first token for the labeled span.
            cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token.

        .. note::

            One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set,
            ``start_positions`` overrides ``start_states``.

        Returns:
            :obj:`torch.FloatTensor`: The SQuAD 2.0 answer class.
thomwolf's avatar
thomwolf committed
1356
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
1357
        # No dependency on end_feature so that we can obtain one single `cls_logits` for each sample.
1358
        hsz = hidden_states.shape[-1]
1359
1360
1361
        assert (
            start_states is not None or start_positions is not None
        ), "One of start_states, start_positions should be not None"
thomwolf's avatar
thomwolf committed
1362
        if start_positions is not None:
1363
1364
            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            start_states = hidden_states.gather(-2, start_positions).squeeze(-2)  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
1365
1366

        if cls_index is not None:
1367
1368
            cls_index = cls_index[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2)  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
1369
        else:
1370
            cls_token_state = hidden_states[:, -1, :]  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
1371
1372
1373
1374
1375
1376
1377
1378

        x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
        x = self.activation(x)
        x = self.dense_1(x).squeeze(-1)

        return x


1379
1380
1381
@dataclass
class SquadHeadOutput(ModelOutput):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1382
    Base class for outputs of question answering models using a :class:`~transformers.modeling_utils.SQuADHead`.
1383
1384
1385

    Args:
        loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Sylvain Gugger's avatar
Sylvain Gugger committed
1386
1387
            Classification loss as the sum of start token, end token (and is_impossible if provided) classification
            losses.
1388
1389
1390
1391
1392
        start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
            Log probabilities for the top config.start_n_top start token possibilities (beam-search).
        start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
            Indices for the top config.start_n_top start token possibilities (beam-search).
        end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Sylvain Gugger's avatar
Sylvain Gugger committed
1393
1394
            Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities
            (beam-search).
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
        end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
            Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
        cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
            Log probabilities for the ``is_impossible`` label of the answers.

    """

    loss: Optional[torch.FloatTensor] = None
    start_top_log_probs: Optional[torch.FloatTensor] = None
    start_top_index: Optional[torch.LongTensor] = None
    end_top_log_probs: Optional[torch.FloatTensor] = None
    end_top_index: Optional[torch.LongTensor] = None
    cls_logits: Optional[torch.FloatTensor] = None


thomwolf's avatar
thomwolf committed
1410
class SQuADHead(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1411
1412
    r"""
    A SQuAD head inspired by XLNet.
1413

Sylvain Gugger's avatar
Sylvain Gugger committed
1414
1415
1416
1417
    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the
            :obj:`layer_norm_eps` to use.
thomwolf's avatar
thomwolf committed
1418
    """
1419

thomwolf's avatar
thomwolf committed
1420
    def __init__(self, config):
Julien Chaumond's avatar
Julien Chaumond committed
1421
        super().__init__()
thomwolf's avatar
thomwolf committed
1422
1423
1424
1425
1426
1427
1428
        self.start_n_top = config.start_n_top
        self.end_n_top = config.end_n_top

        self.start_logits = PoolerStartLogits(config)
        self.end_logits = PoolerEndLogits(config)
        self.answer_class = PoolerAnswerClass(config)

Sylvain Gugger's avatar
Sylvain Gugger committed
1429
    @replace_return_docstrings(output_type=SquadHeadOutput, config_class=PretrainedConfig)
1430
    def forward(
1431
        self,
Sylvain Gugger's avatar
Sylvain Gugger committed
1432
1433
1434
1435
1436
1437
        hidden_states: torch.FloatTensor,
        start_positions: Optional[torch.LongTensor] = None,
        end_positions: Optional[torch.LongTensor] = None,
        cls_index: Optional[torch.LongTensor] = None,
        is_impossible: Optional[torch.LongTensor] = None,
        p_mask: Optional[torch.FloatTensor] = None,
1438
        return_dict: bool = False,
Sylvain Gugger's avatar
Sylvain Gugger committed
1439
1440
    ) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]:
        """
Lysandre's avatar
Lysandre committed
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
        Args:
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
                Final hidden states of the model on the sequence tokens.
            start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Positions of the first token for the labeled span.
            end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Positions of the last token for the labeled span.
            cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token.
            is_impossible (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Whether the question has a possible answer in the paragraph or not.
            p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1453
1454
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
Lysandre's avatar
Lysandre committed
1455
            return_dict (:obj:`bool`, `optional`, defaults to :obj:`False`):
1456
                Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
Sylvain Gugger's avatar
Sylvain Gugger committed
1457

Lysandre's avatar
Lysandre committed
1458
        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
1459
        """
thomwolf's avatar
thomwolf committed
1460
        start_logits = self.start_logits(hidden_states, p_mask=p_mask)
thomwolf's avatar
thomwolf committed
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483

        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, let's remove the dimension added by batch splitting
            for x in (start_positions, end_positions, cls_index, is_impossible):
                if x is not None and x.dim() > 1:
                    x.squeeze_(-1)

            # during training, compute the end logits based on the ground truth of the start position
            end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)

            loss_fct = CrossEntropyLoss()
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2

            if cls_index is not None and is_impossible is not None:
                # Predict answerability from the representation of CLS and START
                cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
                loss_fct_cls = nn.BCEWithLogitsLoss()
                cls_loss = loss_fct_cls(cls_logits, is_impossible)

                # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
                total_loss += cls_loss * 0.5
1484

1485
            return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,)
thomwolf's avatar
thomwolf committed
1486
1487
1488
1489

        else:
            # during inference, compute the end logits based on beam search
            bsz, slen, hsz = hidden_states.size()
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
            start_log_probs = F.softmax(start_logits, dim=-1)  # shape (bsz, slen)

            start_top_log_probs, start_top_index = torch.topk(
                start_log_probs, self.start_n_top, dim=-1
            )  # shape (bsz, start_n_top)
            start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz)  # shape (bsz, start_n_top, hsz)
            start_states = torch.gather(hidden_states, -2, start_top_index_exp)  # shape (bsz, start_n_top, hsz)
            start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1)  # shape (bsz, slen, start_n_top, hsz)

            hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
                start_states
            )  # shape (bsz, slen, start_n_top, hsz)
thomwolf's avatar
thomwolf committed
1502
1503
            p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
            end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
1504
            end_log_probs = F.softmax(end_logits, dim=1)  # shape (bsz, slen, start_n_top)
thomwolf's avatar
thomwolf committed
1505

1506
1507
1508
            end_top_log_probs, end_top_index = torch.topk(
                end_log_probs, self.end_n_top, dim=1
            )  # shape (bsz, end_n_top, start_n_top)
thomwolf's avatar
thomwolf committed
1509
1510
1511
1512
1513
1514
            end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
            end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)

            start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
            cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)

1515
            if not return_dict:
1516
1517
1518
1519
1520
1521
1522
1523
1524
                return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits)
            else:
                return SquadHeadOutput(
                    start_top_log_probs=start_top_log_probs,
                    start_top_index=start_top_index,
                    end_top_log_probs=end_top_log_probs,
                    end_top_index=end_top_index,
                    cls_logits=cls_logits,
                )
thomwolf's avatar
thomwolf committed
1525
1526
1527


class SequenceSummary(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1528
1529
1530
1531
1532
    r"""
    Compute a single vector summary of a sequence hidden states.

    Args:
        config (:class:`~transformers.PretrainedConfig`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1533
1534
            The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
            config class of your model for the default values it uses):
Sylvain Gugger's avatar
Sylvain Gugger committed
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546

            - **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are:

                - :obj:`"last"` -- Take the last token hidden state (like XLNet)
                - :obj:`"first"` -- Take the first token hidden state (like Bert)
                - :obj:`"mean"` -- Take the mean of all tokens hidden states
                - :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
                - :obj:`"attn"` -- Not implemented now, use multi-head attention

            - **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction.
            - **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to
              :obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`).
Sylvain Gugger's avatar
Sylvain Gugger committed
1547
            - **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the
Sylvain Gugger's avatar
Sylvain Gugger committed
1548
1549
1550
1551
1552
              output, another string or :obj:`None` will add no activation.
            - **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and
              activation.
            - **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and
              activation.
thomwolf's avatar
thomwolf committed
1553
    """
1554

1555
    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
1556
        super().__init__()
thomwolf's avatar
thomwolf committed
1557

1558
        self.summary_type = getattr(config, "summary_type", "last")
1559
        if self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
1560
1561
1562
1563
1564
            # We should use a standard multi-head attention module with absolute positional embedding for that.
            # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
            # We can probably just use the multi-head attention module of PyTorch >=1.1.0
            raise NotImplementedError

thomwolf's avatar
thomwolf committed
1565
        self.summary = Identity()
1566
1567
        if hasattr(config, "summary_use_proj") and config.summary_use_proj:
            if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
1568
                num_classes = config.num_labels
thomwolf's avatar
thomwolf committed
1569
1570
1571
1572
            else:
                num_classes = config.hidden_size
            self.summary = nn.Linear(config.hidden_size, num_classes)

1573
        activation_string = getattr(config, "summary_activation", None)
Lysandre's avatar
Lysandre committed
1574
        self.activation: Callable = get_activation(activation_string) if activation_string else Identity()
thomwolf's avatar
thomwolf committed
1575

thomwolf's avatar
thomwolf committed
1576
        self.first_dropout = Identity()
1577
        if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
1578
1579
            self.first_dropout = nn.Dropout(config.summary_first_dropout)

thomwolf's avatar
thomwolf committed
1580
        self.last_dropout = Identity()
1581
        if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
1582
            self.last_dropout = nn.Dropout(config.summary_last_dropout)
thomwolf's avatar
thomwolf committed
1583

Sylvain Gugger's avatar
Sylvain Gugger committed
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
    def forward(
        self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None
    ) -> torch.FloatTensor:
        """
        Compute a single vector summary of a sequence hidden states.

        Args:
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`[batch_size, seq_len, hidden_size]`):
                The hidden states of the last layer.
            cls_index (:obj:`torch.LongTensor` of shape :obj:`[batch_size]` or :obj:`[batch_size, ...]` where ... are optional leading dimensions of :obj:`hidden_states`, `optional`):
                Used if :obj:`summary_type == "cls_index"` and takes the last token of the sequence as classification
                token.

        Returns:
            :obj:`torch.FloatTensor`: The summary of the sequence hidden states.
thomwolf's avatar
thomwolf committed
1599
        """
1600
        if self.summary_type == "last":
thomwolf's avatar
thomwolf committed
1601
            output = hidden_states[:, -1]
1602
        elif self.summary_type == "first":
thomwolf's avatar
thomwolf committed
1603
            output = hidden_states[:, 0]
1604
        elif self.summary_type == "mean":
thomwolf's avatar
thomwolf committed
1605
            output = hidden_states.mean(dim=1)
1606
        elif self.summary_type == "cls_index":
thomwolf's avatar
thomwolf committed
1607
            if cls_index is None:
Lysandre's avatar
Lysandre committed
1608
1609
1610
1611
1612
                cls_index = torch.full_like(
                    hidden_states[..., :1, :],
                    hidden_states.shape[-2] - 1,
                    dtype=torch.long,
                )
thomwolf's avatar
thomwolf committed
1613
            else:
thomwolf's avatar
thomwolf committed
1614
                cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
1615
                cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
thomwolf's avatar
thomwolf committed
1616
            # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
1617
1618
            output = hidden_states.gather(-2, cls_index).squeeze(-2)  # shape (bsz, XX, hidden_size)
        elif self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
1619
1620
            raise NotImplementedError

1621
        output = self.first_dropout(output)
thomwolf's avatar
thomwolf committed
1622
1623
        output = self.summary(output)
        output = self.activation(output)
1624
        output = self.last_dropout(output)
thomwolf's avatar
thomwolf committed
1625
1626
1627
1628

        return output


Sylvain Gugger's avatar
Sylvain Gugger committed
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
def prune_linear_layer(layer: torch.nn.Linear, index: torch.LongTensor, dim: int = 0) -> torch.nn.Linear:
    """
    Prune a linear layer to keep only entries in index.

    Used to remove heads.

    Args:
        layer (:obj:`torch.nn.Linear`): The layer to prune.
        index (:obj:`torch.LongTensor`): The indices to keep in the layer.
        dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.

    Returns:
        :obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
    """
    index = index.to(layer.weight.device)
    W = layer.weight.index_select(dim, index).clone().detach()
    if layer.bias is not None:
        if dim == 1:
            b = layer.bias.clone().detach()
        else:
            b = layer.bias[index].clone().detach()
    new_size = list(layer.weight.size())
    new_size[dim] = len(index)
    new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
    new_layer.weight.requires_grad = False
    new_layer.weight.copy_(W.contiguous())
    new_layer.weight.requires_grad = True
    if layer.bias is not None:
        new_layer.bias.requires_grad = False
        new_layer.bias.copy_(b.contiguous())
        new_layer.bias.requires_grad = True
    return new_layer


Sylvain Gugger's avatar
Sylvain Gugger committed
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D:
    """
    Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights
    are transposed.

    Used to remove heads.

    Args:
        layer (:class:`~transformers.modeling_utils.Conv1D`): The layer to prune.
        index (:obj:`torch.LongTensor`): The indices to keep in the layer.
        dim (:obj:`int`, `optional`, defaults to 1): The dimension on which to keep the indices.

    Returns:
        :class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with :obj:`requires_grad=True`.
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
    """
    index = index.to(layer.weight.device)
    W = layer.weight.index_select(dim, index).clone().detach()
    if dim == 0:
        b = layer.bias.clone().detach()
    else:
        b = layer.bias[index].clone().detach()
    new_size = list(layer.weight.size())
    new_size[dim] = len(index)
    new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
    new_layer.weight.requires_grad = False
    new_layer.weight.copy_(W.contiguous())
    new_layer.weight.requires_grad = True
    new_layer.bias.requires_grad = False
    new_layer.bias.copy_(b.contiguous())
    new_layer.bias.requires_grad = True
    return new_layer
1694
1695


Sylvain Gugger's avatar
Sylvain Gugger committed
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
def prune_layer(
    layer: Union[torch.nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None
) -> Union[torch.nn.Linear, Conv1D]:
    """
    Prune a Conv1D or linear layer to keep only entries in index.

    Used to remove heads.

    Args:
        layer (:obj:`Union[torch.nn.Linear, Conv1D]`): The layer to prune.
        index (:obj:`torch.LongTensor`): The indices to keep in the layer.
        dim (:obj:`int`, `optional`): The dimension on which to keep the indices.

    Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
1710
1711
        :obj:`torch.nn.Linear` or :class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with
        :obj:`requires_grad=True`.
1712
1713
1714
1715
1716
1717
1718
    """
    if isinstance(layer, nn.Linear):
        return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
    elif isinstance(layer, Conv1D):
        return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
    else:
        raise ValueError("Can't prune layer of class {}".format(layer.__class__))
Patrick von Platen's avatar
Patrick von Platen committed
1719
1720
1721


def apply_chunking_to_forward(
1722
    forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors
Patrick von Platen's avatar
Patrick von Platen committed
1723
1724
) -> torch.Tensor:
    """
1725
1726
1727
1728
1729
    This function chunks the :obj:`input_tensors` into smaller input tensor parts of size :obj:`chunk_size` over the
    dimension :obj:`chunk_dim`. It then applies a layer :obj:`forward_fn` to each chunk independently to save memory.

    If the :obj:`forward_fn` is independent across the :obj:`chunk_dim` this function will yield the same result as
    directly applying :obj:`forward_fn` to :obj:`input_tensors`.
Patrick von Platen's avatar
Patrick von Platen committed
1730
1731

    Args:
1732
1733
        forward_fn (:obj:`Callable[..., torch.Tensor]`):
            The forward function of the model.
1734
1735
1736
1737
1738
        chunk_size (:obj:`int`):
            The chunk size of a chunked tensor: :obj:`num_chunks = len(input_tensors[0]) / chunk_size`.
        chunk_dim (:obj:`int`):
            The dimension over which the :obj:`input_tensors` should be chunked.
        input_tensors (:obj:`Tuple[torch.Tensor]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1739
1740
            The input tensors of ``forward_fn`` which will be chunked

Patrick von Platen's avatar
Patrick von Platen committed
1741
    Returns:
1742
        :obj:`torch.Tensor`: A tensor with the same shape as the :obj:`forward_fn` would have given if applied`.
Patrick von Platen's avatar
Patrick von Platen committed
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753


    Examples::

        # rename the usual forward() fn to forward_chunk()
        def forward_chunk(self, hidden_states):
            hidden_states = self.decoder(hidden_states)
            return hidden_states

        # implement a chunked forward function
        def forward(self, hidden_states):
1754
            return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
Patrick von Platen's avatar
Patrick von Platen committed
1755
1756
1757
    """

    assert len(input_tensors) > 0, "{} has to be a tuple/list of tensors".format(input_tensors)
1758
    tensor_shape = input_tensors[0].shape[chunk_dim]
Patrick von Platen's avatar
Patrick von Platen committed
1759
    assert all(
1760
        input_tensor.shape[chunk_dim] == tensor_shape for input_tensor in input_tensors
Patrick von Platen's avatar
Patrick von Platen committed
1761
1762
    ), "All input tenors have to be of the same shape"

1763
    # inspect.signature exist since python 3.5 and is a python method -> no problem with backward compatibility
Patrick von Platen's avatar
Patrick von Platen committed
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
    num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters)
    assert num_args_in_forward_chunk_fn == len(
        input_tensors
    ), "forward_chunk_fn expects {} arguments, but only {} input tensors are given".format(
        num_args_in_forward_chunk_fn, len(input_tensors)
    )

    if chunk_size > 0:
        assert (
            input_tensors[0].shape[chunk_dim] % chunk_size == 0
        ), "The dimension to be chunked {} has to be a multiple of the chunk size {}".format(
1775
            input_tensors[0].shape[chunk_dim], chunk_size
Patrick von Platen's avatar
Patrick von Platen committed
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
        )

        num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size

        # chunk input tensor into tuples
        input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors)
        # apply forward fn to every tuple
        output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks))
        # concatenate output at same dimension
        return torch.cat(output_chunks, dim=chunk_dim)

    return forward_fn(*input_tensors)