modeling_utils.py 89.9 KB
Newer Older
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Patrick von Platen's avatar
Patrick von Platen committed
17
import inspect
18
import os
19
import re
20
import warnings
21
from dataclasses import dataclass
22
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
23
24

import torch
25
from torch import Tensor, device, dtype, nn
26
27
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
28

29
from .activations import get_activation
30
from .configuration_utils import PretrainedConfig
31
from .file_utils import (
Aymeric Augustin's avatar
Aymeric Augustin committed
32
    DUMMY_INPUTS,
33
    FLAX_WEIGHTS_NAME,
34
35
36
    TF2_WEIGHTS_NAME,
    TF_WEIGHTS_NAME,
    WEIGHTS_NAME,
37
    ModelOutput,
38
39
    cached_path,
    hf_bucket_url,
40
    is_offline_mode,
41
    is_remote_url,
Sylvain Gugger's avatar
Sylvain Gugger committed
42
    replace_return_docstrings,
43
)
44
from .generation_utils import GenerationMixin
45
from .integrations import is_deepspeed_zero3_enabled
Lysandre Debut's avatar
Lysandre Debut committed
46
from .utils import logging
47

Aymeric Augustin's avatar
Aymeric Augustin committed
48

Lysandre Debut's avatar
Lysandre Debut committed
49
logger = logging.get_logger(__name__)
50

thomwolf's avatar
thomwolf committed
51
52
53
54
55
try:
    from torch.nn import Identity
except ImportError:
    # Older PyTorch compatibility
    class Identity(nn.Module):
Lysandre's avatar
Lysandre committed
56
        r"""A placeholder identity operator that is argument-insensitive."""
57

thomwolf's avatar
thomwolf committed
58
        def __init__(self, *args, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
59
            super().__init__()
thomwolf's avatar
thomwolf committed
60
61
62
63

        def forward(self, input):
            return input

64

65
def find_pruneable_heads_and_indices(
Sylvain Gugger's avatar
Sylvain Gugger committed
66
67
68
69
70
71
72
73
74
75
76
77
78
79
    heads: List[int], n_heads: int, head_size: int, already_pruned_heads: Set[int]
) -> Tuple[Set[int], torch.LongTensor]:
    """
    Finds the heads and their indices taking :obj:`already_pruned_heads` into account.

    Args:
        heads (:obj:`List[int]`): List of the indices of heads to prune.
        n_heads (:obj:`int`): The number of heads in the model.
        head_size (:obj:`int`): The size of each head.
        already_pruned_heads (:obj:`Set[int]`): A set of already pruned heads.

    Returns:
        :obj:`Tuple[Set[int], torch.LongTensor]`: A tuple with the remaining heads and their corresponding indices.
    """
80
81
82
83
84
85
86
87
88
89
90
    mask = torch.ones(n_heads, head_size)
    heads = set(heads) - already_pruned_heads  # Convert to set and remove already pruned heads
    for head in heads:
        # Compute how many pruned heads are before the head and move the index accordingly
        head = head - sum(1 if h < head else 0 for h in already_pruned_heads)
        mask[head] = 0
    mask = mask.view(-1).contiguous().eq(1)
    index: torch.LongTensor = torch.arange(len(mask))[mask].long()
    return heads, index


Lysandre Debut's avatar
Lysandre Debut committed
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
def get_parameter_device(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]):
    try:
        return next(parameter.parameters()).device
    except StopIteration:
        # For nn.DataParallel compatibility in PyTorch 1.5

        def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
            tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
            return tuples

        gen = parameter._named_members(get_members_fn=find_tensor_attributes)
        first_tuple = next(gen)
        return first_tuple[1].device


def get_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]):
    try:
        return next(parameter.parameters()).dtype
    except StopIteration:
        # For nn.DataParallel compatibility in PyTorch 1.5

        def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
            tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
            return tuples

        gen = parameter._named_members(get_members_fn=find_tensor_attributes)
        first_tuple = next(gen)
        return first_tuple[1].dtype


121
class ModuleUtilsMixin:
Julien Chaumond's avatar
Julien Chaumond committed
122
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
123
    A few utilities for :obj:`torch.nn.Modules`, to be used as a mixin.
Julien Chaumond's avatar
Julien Chaumond committed
124
125
    """

126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
    @staticmethod
    def _hook_rss_memory_pre_forward(module, *args, **kwargs):
        try:
            import psutil
        except (ImportError):
            raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")

        process = psutil.Process(os.getpid())
        mem = process.memory_info()
        module.mem_rss_pre_forward = mem.rss
        return None

    @staticmethod
    def _hook_rss_memory_post_forward(module, *args, **kwargs):
        try:
            import psutil
        except (ImportError):
            raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")

        process = psutil.Process(os.getpid())
        mem = process.memory_info()
        module.mem_rss_post_forward = mem.rss
        mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward
        module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0)
        return None

    def add_memory_hooks(self):
Sylvain Gugger's avatar
Sylvain Gugger committed
153
154
155
156
157
        """
        Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.

        Increase in memory consumption is stored in a :obj:`mem_rss_diff` attribute for each module and can be reset to
        zero with :obj:`model.reset_memory_hooks_state()`.
158
159
160
161
162
163
164
        """
        for module in self.modules():
            module.register_forward_pre_hook(self._hook_rss_memory_pre_forward)
            module.register_forward_hook(self._hook_rss_memory_post_forward)
        self.reset_memory_hooks_state()

    def reset_memory_hooks_state(self):
Sylvain Gugger's avatar
Sylvain Gugger committed
165
166
167
168
        """
        Reset the :obj:`mem_rss_diff` attribute of each module (see
        :func:`~transformers.modeling_utils.ModuleUtilsMixin.add_memory_hooks`).
        """
169
170
171
172
173
        for module in self.modules():
            module.mem_rss_diff = 0
            module.mem_rss_post_forward = 0
            module.mem_rss_pre_forward = 0

174
    @property
175
    def device(self) -> device:
176
        """
177
178
        :obj:`torch.device`: The device on which the module is (assuming that all the module parameters are on the same
        device).
179
        """
Lysandre Debut's avatar
Lysandre Debut committed
180
        return get_parameter_device(self)
181

182
183
    @property
    def dtype(self) -> dtype:
184
        """
185
        :obj:`torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
186
        """
Lysandre Debut's avatar
Lysandre Debut committed
187
        return get_parameter_dtype(self)
188
189

    def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:
Sylvain Gugger's avatar
Sylvain Gugger committed
190
191
192
193
194
195
196
197
198
        """
        Invert an attention mask (e.g., switches 0. and 1.).

        Args:
            encoder_attention_mask (:obj:`torch.Tensor`): An attention mask.

        Returns:
            :obj:`torch.Tensor`: The inverted attention mask.
        """
199
200
201
202
203
204
205
206
207
208
        if encoder_attention_mask.dim() == 3:
            encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
        if encoder_attention_mask.dim() == 2:
            encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
        # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
        # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
        # /transformer/transformer_layers.py#L270
        # encoder_extended_attention_mask = (encoder_extended_attention_mask ==
        # encoder_extended_attention_mask.transpose(-1, -2))
        encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype)  # fp16 compatibility
209
210
211
212
213
214
215

        if self.dtype == torch.float16:
            encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4
        elif self.dtype == torch.float32:
            encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
        else:
            raise ValueError(
216
                f"{self.dtype} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`"
217
218
            )

219
220
        return encoder_extended_attention_mask

Sylvain Gugger's avatar
Sylvain Gugger committed
221
222
223
    def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device) -> Tensor:
        """
        Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
224
225

        Arguments:
Sylvain Gugger's avatar
Sylvain Gugger committed
226
227
228
229
230
231
            attention_mask (:obj:`torch.Tensor`):
                Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
            input_shape (:obj:`Tuple[int]`):
                The shape of the input to the model.
            device: (:obj:`torch.device`):
                The device of the input to the model.
232
233

        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
234
            :obj:`torch.Tensor` The extended attention mask, with a the same dtype as :obj:`attention_mask.dtype`.
235
236
237
238
239
240
241
242
243
244
245
246
247
        """
        # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
        # ourselves in which case we just need to make it broadcastable to all heads.
        if attention_mask.dim() == 3:
            extended_attention_mask = attention_mask[:, None, :, :]
        elif attention_mask.dim() == 2:
            # Provided a padding mask of dimensions [batch_size, seq_length]
            # - if the model is a decoder, apply a causal mask in addition to the padding mask
            # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
            if self.config.is_decoder:
                batch_size, seq_length = input_shape
                seq_ids = torch.arange(seq_length, device=device)
                causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
248
                # in case past_key_values are used we need to add a prefix ones mask to the causal mask
Patrick von Platen's avatar
Patrick von Platen committed
249
250
251
                # causal and attention masks must have same type with pytorch version < 1.3
                causal_mask = causal_mask.to(attention_mask.dtype)

252
253
254
                if causal_mask.shape[1] < attention_mask.shape[1]:
                    prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
                    causal_mask = torch.cat(
Patrick von Platen's avatar
Patrick von Platen committed
255
256
257
258
259
260
261
                        [
                            torch.ones(
                                (batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype
                            ),
                            causal_mask,
                        ],
                        axis=-1,
262
263
                    )

264
265
266
267
268
                extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
            else:
                extended_attention_mask = attention_mask[:, None, None, :]
        else:
            raise ValueError(
269
                f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
270
271
272
273
274
275
276
277
278
279
280
            )

        # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
        # masked positions, this operation will create a tensor which is 0.0 for
        # positions we want to attend and -10000.0 for masked positions.
        # Since we are adding it to the raw scores before the softmax, this is
        # effectively the same as removing these entirely.
        extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)  # fp16 compatibility
        extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
        return extended_attention_mask

Sylvain Gugger's avatar
Sylvain Gugger committed
281
282
283
    def get_head_mask(
        self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False
    ) -> Tensor:
284
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
285
286
287
288
289
290
291
292
293
294
        Prepare the head mask if needed.

        Args:
            head_mask (:obj:`torch.Tensor` with shape :obj:`[num_heads]` or :obj:`[num_hidden_layers x num_heads]`, `optional`):
                The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
            num_hidden_layers (:obj:`int`):
                The number of hidden layers in the model.
            is_attention_chunked: (:obj:`bool`, `optional, defaults to :obj:`False`):
                Whether or not the attentions scores are computed by chunks or not.

295
        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
296
297
            :obj:`torch.Tensor` with shape :obj:`[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or
            list with :obj:`[None]` for each layer.
298
299
300
        """
        if head_mask is not None:
            head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
Patrick von Platen's avatar
Patrick von Platen committed
301
302
            if is_attention_chunked is True:
                head_mask = head_mask.unsqueeze(-1)
303
304
305
306
307
308
309
310
311
312
313
314
315
        else:
            head_mask = [None] * num_hidden_layers

        return head_mask

    def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
        """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
        if head_mask.dim() == 1:
            head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
            head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
        elif head_mask.dim() == 2:
            head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # We can specify head_mask for each layer
        assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
316
        head_mask = head_mask.to(dtype=self.dtype)  # switch to float if need + fp16 compatibility
317
318
        return head_mask

319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
    def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int:
        """
        Get number of (optionally, trainable or non-embeddings) parameters in the module.

        Args:
            only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to return only the number of trainable parameters

            exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to return only the number of non-embeddings parameters

        Returns:
            :obj:`int`: The number of parameters.
        """

        def parameter_filter(x):
            return (x.requires_grad or not only_trainable) and not (
                isinstance(x, torch.nn.Embedding) and exclude_embeddings
            )

        params = filter(parameter_filter, self.parameters()) if only_trainable else self.parameters()
        return sum(p.numel() for p in params)

    def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int:
        """
        Helper function to estimate the total number of tokens from the model inputs.

        Args:
            inputs (:obj:`dict`): The model inputs.

        Returns:
            :obj:`int`: The total number of tokens.
        """
        token_inputs = [tensor for key, tensor in input_dict.items() if "input" in key]
        if token_inputs:
            return sum([token_input.numel() for token_input in token_inputs])
        else:
            warnings.warn(
                "Could not estimate the number of tokens of the input, floating-point operations will not be computed"
            )
            return 0

    def floating_point_ops(
        self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool = True
    ) -> int:
        """
        Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a
        batch with this transformer model. Default approximation neglects the quadratic dependency on the number of
Sylvain Gugger's avatar
Sylvain Gugger committed
367
        tokens (valid if :obj:`12 * d_model << sequence_length`) as laid out in `this paper
368
        <https://arxiv.org/pdf/2001.08361.pdf>`__ section 2.1. Should be overridden for transformers with parameter
Sylvain Gugger's avatar
Sylvain Gugger committed
369
        re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths.
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386

        Args:
            batch_size (:obj:`int`):
                The batch size for the forward pass.

            sequence_length (:obj:`int`):
                The number of tokens in each line of the batch.

            exclude_embeddings (:obj:`bool`, `optional`, defaults to :obj:`True`):
                Whether or not to count embedding and softmax operations.

        Returns:
            :obj:`int`: The number of floating-point operations.
        """

        return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings)

Julien Chaumond's avatar
Julien Chaumond committed
387

388
class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin):
389
390
    r"""
    Base class for all models.
391

392
393
    :class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods
    for loading, downloading and saving models as well as a few methods common to all models to:
394

395
396
        * resize the input embeddings,
        * prune heads in the self-attention heads.
397

398
    Class attributes (overridden by derived classes):
Sylvain Gugger's avatar
Sylvain Gugger committed
399

400
401
        - **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of
          :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
Sylvain Gugger's avatar
Sylvain Gugger committed
402
403
        - **load_tf_weights** (:obj:`Callable`) -- A python `method` for loading a TensorFlow checkpoint in a PyTorch
          model, taking as arguments:
404

405
406
            - **model** (:class:`~transformers.PreTrainedModel`) -- An instance of the model on which to load the
              TensorFlow checkpoint.
Sylvain Gugger's avatar
Sylvain Gugger committed
407
408
            - **config** (:class:`~transformers.PreTrainedConfig`) -- An instance of the configuration associated to
              the model.
409
410
411
412
            - **path** (:obj:`str`) -- A path to the TensorFlow checkpoint.

        - **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in
          derived classes of the same architecture adding modules on top of the base model.
413
        - **is_parallelizable** (:obj:`bool`) -- A flag indicating whether this model supports model parallelization.
414
    """
415
    config_class = None
416
    base_model_prefix = ""
417
418
419
420
421
422
423
424
425
    # a list of re pattern of tensor names to ignore from the model when loading the model weights
    # (and avoid unnecessary warnings).
    _keys_to_ignore_on_load_missing = None
    # a list of re pattern of tensor names to ignore from the weights when loading the model weights
    # (and avoid unnecessary warnings).
    _keys_to_ignore_on_load_unexpected = None
    # a list of of tensor names to ignore when saving the model (useful for keys that aren't
    # trained, but which are deterministic)
    _keys_to_ignore_on_save = None
426

427
428
    is_parallelizable = False

429
    @property
430
    def dummy_inputs(self) -> Dict[str, torch.Tensor]:
431
432
        """
        :obj:`Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network.
433
        """
434
        return {"input_ids": torch.tensor(DUMMY_INPUTS)}
435

436
    def __init__(self, config: PretrainedConfig, *inputs, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
437
        super().__init__()
438
439
        if not isinstance(config, PretrainedConfig):
            raise ValueError(
440
441
442
                f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
                "`PretrainedConfig`. To create a model from a pretrained model use "
                f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
443
            )
444
        # Save config and origin of the pretrained weights if given in model
445
        self.config = config
446
        self.name_or_path = config.name_or_path
447

448
    @property
449
450
451
452
    def base_model(self) -> nn.Module:
        """
        :obj:`torch.nn.Module`: The main body of the model.
        """
453
        return getattr(self, self.base_model_prefix, self)
thomwolf's avatar
thomwolf committed
454

455
    def get_input_embeddings(self) -> nn.Module:
456
457
458
459
        """
        Returns the model's input embeddings.

        Returns:
460
            :obj:`nn.Module`: A torch module mapping vocabulary to hidden states.
thomwolf's avatar
thomwolf committed
461
        """
462
        base_model = getattr(self, self.base_model_prefix, self)
thomwolf's avatar
thomwolf committed
463
464
465
466
        if base_model is not self:
            return base_model.get_input_embeddings()
        else:
            raise NotImplementedError
thomwolf's avatar
thomwolf committed
467

468
    def set_input_embeddings(self, value: nn.Module):
469
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
470
        Set model's input embeddings.
471
472

        Args:
473
            value (:obj:`nn.Module`): A module mapping vocabulary to hidden states.
thomwolf's avatar
thomwolf committed
474
475
476
477
478
479
        """
        base_model = getattr(self, self.base_model_prefix, self)
        if base_model is not self:
            base_model.set_input_embeddings(value)
        else:
            raise NotImplementedError
thomwolf's avatar
thomwolf committed
480

481
    def get_output_embeddings(self) -> nn.Module:
482
483
484
485
        """
        Returns the model's output embeddings.

        Returns:
486
            :obj:`nn.Module`: A torch module mapping hidden states to vocabulary.
thomwolf's avatar
thomwolf committed
487
        """
488
        return None  # Overwrite for models with output embeddings
thomwolf's avatar
thomwolf committed
489

490
    def tie_weights(self):
491
492
        """
        Tie the weights between the input embeddings and the output embeddings.
493
494

        If the :obj:`torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning
495
        the weights instead.
thomwolf's avatar
thomwolf committed
496
        """
thomwolf's avatar
thomwolf committed
497
        output_embeddings = self.get_output_embeddings()
498
        if output_embeddings is not None and self.config.tie_word_embeddings:
thomwolf's avatar
thomwolf committed
499
            self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
thomwolf's avatar
thomwolf committed
500

501
        if self.config.is_encoder_decoder and self.config.tie_encoder_decoder:
Weizhen's avatar
Weizhen committed
502
503
            if hasattr(self, self.base_model_prefix):
                self = getattr(self, self.base_model_prefix)
504
505
506
507
508
            self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix)

    @staticmethod
    def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str):
        uninitialized_encoder_weights: List[str] = []
Weizhen's avatar
Weizhen committed
509
510
511
512
        if decoder.__class__ != encoder.__class__:
            logger.info(
                f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized."
            )
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544

        def tie_encoder_to_decoder_recursively(
            decoder_pointer: nn.Module,
            encoder_pointer: nn.Module,
            module_name: str,
            uninitialized_encoder_weights: List[str],
            depth=0,
        ):
            assert isinstance(decoder_pointer, nn.Module) and isinstance(
                encoder_pointer, nn.Module
            ), f"{decoder_pointer} and {encoder_pointer} have to be of type torch.nn.Module"
            if hasattr(decoder_pointer, "weight"):
                assert hasattr(encoder_pointer, "weight")
                encoder_pointer.weight = decoder_pointer.weight
                if hasattr(decoder_pointer, "bias"):
                    assert hasattr(encoder_pointer, "bias")
                    encoder_pointer.bias = decoder_pointer.bias
                return

            encoder_modules = encoder_pointer._modules
            decoder_modules = decoder_pointer._modules
            if len(decoder_modules) > 0:
                assert (
                    len(encoder_modules) > 0
                ), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"

                all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()])
                encoder_layer_pos = 0
                for name, module in decoder_modules.items():
                    if name.isdigit():
                        encoder_name = str(int(name) + encoder_layer_pos)
                        decoder_name = name
Weizhen's avatar
Weizhen committed
545
546
547
                        if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len(
                            encoder_modules
                        ) != len(decoder_modules):
548
549
                            # this can happen if the name corresponds to the position in a list module list of layers
                            # in this case the decoder has added a cross-attention that the encoder does not have
550
                            # thus skip this step and subtract one layer pos from encoder
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
                            encoder_layer_pos -= 1
                            continue
                    elif name not in encoder_modules:
                        continue
                    elif depth > 500:
                        raise ValueError(
                            "Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model."
                        )
                    else:
                        decoder_name = encoder_name = name
                    tie_encoder_to_decoder_recursively(
                        decoder_modules[decoder_name],
                        encoder_modules[encoder_name],
                        module_name + "/" + name,
                        uninitialized_encoder_weights,
                        depth=depth + 1,
                    )
                    all_encoder_weights.remove(module_name + "/" + encoder_name)

                uninitialized_encoder_weights += list(all_encoder_weights)

        # tie weights recursively
        tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights)
        if len(uninitialized_encoder_weights) > 0:
            logger.warning(
                f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}"
            )

579
    def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
Lysandre's avatar
Lysandre committed
580
        """Tie or clone module weights depending of whether we are using TorchScript or not"""
thomwolf's avatar
thomwolf committed
581
        if self.config.torchscript:
582
            output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
thomwolf's avatar
thomwolf committed
583
        else:
584
            output_embeddings.weight = input_embeddings.weight
thomwolf's avatar
thomwolf committed
585

Sam Shleifer's avatar
Sam Shleifer committed
586
        if getattr(output_embeddings, "bias", None) is not None:
587
588
            output_embeddings.bias.data = torch.nn.functional.pad(
                output_embeddings.bias.data,
Lysandre's avatar
Lysandre committed
589
590
591
592
                (
                    0,
                    output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],
                ),
593
594
                "constant",
                0,
595
            )
596
        if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
597
            output_embeddings.out_features = input_embeddings.num_embeddings
598

599
600
601
    def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> torch.nn.Embedding:
        """
        Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`.
602

603
        Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method.
thomwolf's avatar
thomwolf committed
604

605
606
607
608
        Arguments:
            new_num_tokens (:obj:`int`, `optional`):
                The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
                vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`,
609
                just returns a pointer to the input tokens :obj:`torch.nn.Embedding` module of the model without doing
610
611
612
613
                anything.

        Return:
            :obj:`torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model.
thomwolf's avatar
thomwolf committed
614
        """
615
        model_embeds = self._resize_token_embeddings(new_num_tokens)
thomwolf's avatar
thomwolf committed
616
617
        if new_num_tokens is None:
            return model_embeds
thomwolf's avatar
thomwolf committed
618
619
620

        # Update base model and current model config
        self.config.vocab_size = new_num_tokens
621
        self.vocab_size = new_num_tokens
thomwolf's avatar
thomwolf committed
622
623

        # Tie weights again if needed
624
        self.tie_weights()
thomwolf's avatar
thomwolf committed
625

thomwolf's avatar
thomwolf committed
626
627
        return model_embeds

628
    def _resize_token_embeddings(self, new_num_tokens):
thomwolf's avatar
thomwolf committed
629
630
631
        old_embeddings = self.get_input_embeddings()
        new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
        self.set_input_embeddings(new_embeddings)
632
633
634
635
636
637
638

        # if word embeddings are not tied, make sure that lm head is resized as well
        if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings:
            old_lm_head = self.get_output_embeddings()
            new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens)
            self.set_output_embeddings(new_lm_head)

thomwolf's avatar
thomwolf committed
639
        return self.get_input_embeddings()
640

641
642
643
    def _get_resized_embeddings(
        self, old_embeddings: torch.nn.Embedding, new_num_tokens: Optional[int] = None
    ) -> torch.nn.Embedding:
644
645
646
        """
        Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly
        initialized vectors at the end. Reducing the size will remove vectors from the end
647
648

        Args:
649
            old_embeddings (:obj:`torch.nn.Embedding`):
650
                Old embeddings to be resized.
651
            new_num_tokens (:obj:`int`, `optional`):
652
                New number of tokens in the embedding matrix.
653
654
655

                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
                vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
656
                :obj:`torch.nn.Embedding`` module of the model without doing anything.
657
658
659
660

        Return:
            :obj:`torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if
            :obj:`new_num_tokens` is :obj:`None`
661
662
663
664
        """
        if new_num_tokens is None:
            return old_embeddings

665
666
667
668
669
670
671
672
        if is_deepspeed_zero3_enabled():
            import deepspeed

            with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None):
                old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
        else:
            old_num_tokens, old_embedding_dim = old_embeddings.weight.size()

673
674
675
        if old_num_tokens == new_num_tokens:
            return old_embeddings

676
677
678
679
680
681
        if not isinstance(old_embeddings, nn.Embedding):
            raise TypeError(
                f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}."
                f"You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Embedding}."
            )

682
        # Build new embeddings
683
        new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim).to(self.device)
684
685
686
687

        # initialize all new embeddings (in particular added tokens)
        self._init_weights(new_embeddings)

688
        # Copy token embeddings from the previous weights
689
690
691
692
693
694
695
696
697
698
699

        # numbers of tokens to copy
        n = min(old_num_tokens, new_num_tokens)
        if is_deepspeed_zero3_enabled():
            import deepspeed

            with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=0):
                if torch.distributed.get_rank() == 0:
                    new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]
        else:
            new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]
700
701
702

        return new_embeddings

703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
    def _get_resized_lm_head(
        self, old_lm_head: torch.nn.Linear, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False
    ) -> torch.nn.Linear:
        """
        Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized
        vectors at the end. Reducing the size will remove vectors from the end

        Args:
            old_lm_head (:obj:`torch.nn.Linear`):
                Old lm head liner layer to be resized.
            new_num_tokens (:obj:`int`, `optional`):
                New number of tokens in the linear matrix.

                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
                vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
                :obj:`torch.nn.Linear`` module of the model without doing anything.
            transposed (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether ``old_lm_head`` is transposed or not. If True ``old_lm_head.size()`` is ``lm_head_dim,
                vocab_size`` else ``vocab_size, lm_head_dim``.

        Return:
            :obj:`torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if
            :obj:`new_num_tokens` is :obj:`None`
        """
        if new_num_tokens is None:
            return old_lm_head

        old_num_tokens, old_lm_head_dim = (
            old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size()
        )

        if old_num_tokens == new_num_tokens:
            return old_lm_head

        if not isinstance(old_lm_head, nn.Linear):
            raise TypeError(
                f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}."
                f"You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Linear}."
            )

        # Build new lm head
        new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim)
        has_new_lm_head_bias = old_lm_head.bias is not None
        new_lm_head = nn.Linear(*new_lm_head_shape, bias=has_new_lm_head_bias).to(self.device)

        # initialize new lm head (in particular added tokens)
        self._init_weights(new_lm_head)

        num_tokens_to_copy = min(old_num_tokens, new_num_tokens)

        # Copy old lm head weights to new lm head
        if not transposed:
            new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :]
        else:
            new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy]

        # Copy bias weights to new lm head
        if has_new_lm_head_bias:
            new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy]

        return new_lm_head

765
    def init_weights(self):
766
767
768
        """
        Initializes and prunes weights if needed.
        """
769
770
771
772
773
774
775
        # Initialize weights
        self.apply(self._init_weights)

        # Prune heads if needed
        if self.config.pruned_heads:
            self.prune_heads(self.config.pruned_heads)

776
777
778
        # Tie weights if needed
        self.tie_weights()

779
780
781
    def prune_heads(self, heads_to_prune: Dict[int, List[int]]):
        """
        Prunes heads of the base model.
782

783
784
        Arguments:
            heads_to_prune (:obj:`Dict[int, List[int]]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
785
786
787
                Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of
                heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads
                0 and 2 on layer 1 and heads 2 and 3 on layer 2.
thomwolf's avatar
thomwolf committed
788
        """
789
        # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
790
        for layer, heads in heads_to_prune.items():
791
792
793
            union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
            self.config.pruned_heads[layer] = list(union_heads)  # Unfortunately we have to store it as list for JSON

794
        self.base_model._prune_heads(heads_to_prune)
thomwolf's avatar
thomwolf committed
795

796
797
798
799
800
801
802
    def save_pretrained(
        self,
        save_directory: Union[str, os.PathLike],
        save_config: bool = True,
        state_dict: Optional[dict] = None,
        save_function: Callable = torch.save,
    ):
803
804
805
        """
        Save a model and its configuration file to a directory, so that it can be re-loaded using the
        `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
806

807
        Arguments:
808
            save_directory (:obj:`str` or :obj:`os.PathLike`):
809
                Directory to which to save. Will be created if it doesn't exist.
810
811
812
813
814
815
816
817
818
819
820
            save_config (:obj:`bool`, `optional`, defaults to :obj:`True`):
                Whether or not to save the config of the model. Useful when in distributed training like TPUs and need
                to call this function on all processes. In this case, set :obj:`save_config=True` only on the main
                process to avoid race conditions.
            state_dict (nested dictionary of :obj:`torch.Tensor`):
                The state dictionary of the model to save. Will default to :obj:`self.state_dict()`, but can be used to
                only save parts of the model or if special precautions need to be taken when recovering the state
                dictionary of a model (like when using model parallelism).
            save_function (:obj:`Callable`):
                The function to use to save the state dictionary. Useful on distributed training like TPUs when one
                need to replace :obj:`torch.save` by another method.
821
        """
822
        if os.path.isfile(save_directory):
823
            logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
824
825
            return
        os.makedirs(save_directory, exist_ok=True)
826

Julien Chaumond's avatar
Julien Chaumond committed
827
        # Only save the model itself if we are using distributed training
828
        model_to_save = unwrap_model(self)
829

Julien Chaumond's avatar
Julien Chaumond committed
830
831
832
        # Attach architecture to the config
        model_to_save.config.architectures = [model_to_save.__class__.__name__]

833
834
835
836
837
838
839
        # Save the config
        if save_config:
            model_to_save.config.save_pretrained(save_directory)

        # Save the model
        if state_dict is None:
            state_dict = model_to_save.state_dict()
840
841

        # Handle the case where some state_dict keys shouldn't be saved
842
843
        if self._keys_to_ignore_on_save is not None:
            state_dict = {k: v for k, v in state_dict.items() if k not in self._keys_to_ignore_on_save}
844

845
846
        # If we save using the predefined names, we can load using `from_pretrained`
        output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
847
        save_function(state_dict, output_model_file)
848

849
        logger.info(f"Model weights saved in {output_model_file}")
850

851
    @classmethod
852
    def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
853
854
        r"""
        Instantiate a pretrained pytorch model from a pre-trained model configuration.
855

Sylvain Gugger's avatar
Sylvain Gugger committed
856
857
        The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated). To
        train the model, you should first set it back in training mode with ``model.train()``.
858

859
860
861
        The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come
        pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
        task.
862

863
864
        The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those
        weights are discarded.
865

866
        Parameters:
867
            pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`, `optional`):
868
869
                Can be either:

870
871
872
                    - A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
                      Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
                      a user or organization name, like ``dbmdz/bert-base-german-cased``.
873
874
                    - A path to a `directory` containing model weights saved using
                      :func:`~transformers.PreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
Sylvain Gugger's avatar
Sylvain Gugger committed
875
                    - A path or url to a `tensorflow index checkpoint file` (e.g, ``./tf_model/model.ckpt.index``). In
876
877
878
                      this case, ``from_tf`` should be set to :obj:`True` and a configuration object should be provided
                      as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in
                      a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
879
880
881
                    - A path or url to a model folder containing a `flax checkpoint file` in `.msgpack` format (e.g,
                      ``./flax_model/`` containing ``flax_model.msgpack``). In this case, ``from_flax`` should be set
                      to :obj:`True`.
882
883
884
885
                    - :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
                      arguments ``config`` and ``state_dict``).
            model_args (sequence of positional arguments, `optional`):
                All remaning positional arguments will be passed to the underlying model's ``__init__`` method.
886
            config (:obj:`Union[PretrainedConfig, str, os.PathLike]`, `optional`):
887
888
889
                Can be either:

                    - an instance of a class derived from :class:`~transformers.PretrainedConfig`,
890
                    - a string or path valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`.
891
892
893
894

                Configuration for the model to use instead of an automatically loaded configuation. Configuration can
                be automatically loaded when:

895
896
                    - The model is a model provided by the library (loaded with the `model id` string of a pretrained
                      model).
897
                    - The model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded
898
899
                      by supplying the save directory.
                    - The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
900
901
902
903
904
905
906
907
                      configuration JSON file named `config.json` is found in the directory.
            state_dict (:obj:`Dict[str, torch.Tensor]`, `optional`):
                A state dictionary to use instead of a state dictionary loaded from saved weights file.

                This option can be used if you want to create a model from a pretrained configuration but load your own
                weights. In this case though, you should check if using
                :func:`~transformers.PreTrainedModel.save_pretrained` and
                :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
908
            cache_dir (:obj:`Union[str, os.PathLike]`, `optional`):
909
910
911
912
913
                Path to a directory in which a downloaded pretrained model configuration should be cached if the
                standard cache should not be used.
            from_tf (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Load the model weights from a TensorFlow checkpoint save file (see docstring of
                ``pretrained_model_name_or_path`` argument).
914
915
916
            from_flax (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Load the model weights from a Flax checkpoint save file (see docstring of
                ``pretrained_model_name_or_path`` argument).
917
918
919
920
921
922
            force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
                Whether or not to delete incompletely received files. Will attempt to resume the download if such a
                file exists.
Sylvain Gugger's avatar
Sylvain Gugger committed
923
            proxies (:obj:`Dict[str, str], `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
924
925
                A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
926
            output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Sylvain Gugger's avatar
Sylvain Gugger committed
927
                Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
928
            local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Stas Bekman's avatar
Stas Bekman committed
929
                Whether or not to only look at local files (i.e., do not try to download the model).
930
931
932
            use_auth_token (:obj:`str` or `bool`, `optional`):
                The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
                generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
Julien Chaumond's avatar
Julien Chaumond committed
933
934
935
936
            revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
                identifier allowed by git.
937
            mirror(:obj:`str`, `optional`, defaults to :obj:`None`):
Sylvain Gugger's avatar
Sylvain Gugger committed
938
939
940
                Mirror source to accelerate downloads in China. If you are from China and have an accessibility
                problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
                Please refer to the mirror site for more information.
941
942
            kwargs (remaining dictionary of keyword arguments, `optional`):
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
943
                :obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
944
945
946
947
948
949
950
951
952
953
                automatically loaded:

                    - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
                      underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
                      already been done)
                    - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
                      initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
                      ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
                      with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
                      attribute will be passed to the underlying model's ``__init__`` function.
954

955
956
957
958
        .. note::

            Passing :obj:`use_auth_token=True` is required when you want to use a private model.

959
960
961
962
963
964
        .. note::

            Activate the special `"offline-mode"
            <https://huggingface.co/transformers/installation.html#offline-mode>`__ to use this method in a firewalled
            environment.

965
        Examples::
thomwolf's avatar
thomwolf committed
966

967
            >>> from transformers import BertConfig, BertModel
968
            >>> # Download model and configuration from huggingface.co and cache.
969
970
971
972
973
974
975
976
977
            >>> model = BertModel.from_pretrained('bert-base-uncased')
            >>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable).
            >>> model = BertModel.from_pretrained('./test/saved_model/')
            >>> # Update configuration during loading.
            >>> model = BertModel.from_pretrained('bert-base-uncased', output_attentions=True)
            >>> assert model.config.output_attentions == True
            >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).
            >>> config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
            >>> model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
978
979
980
            >>> # Loading from a Flax checkpoint file instead of a PyTorch model (slower)
            >>> model = BertModel.from_pretrained('bert-base-uncased', from_flax=True)

981
        """
982
983
984
985
        config = kwargs.pop("config", None)
        state_dict = kwargs.pop("state_dict", None)
        cache_dir = kwargs.pop("cache_dir", None)
        from_tf = kwargs.pop("from_tf", False)
986
        from_flax = kwargs.pop("from_flax", False)
987
988
989
990
        force_download = kwargs.pop("force_download", False)
        resume_download = kwargs.pop("resume_download", False)
        proxies = kwargs.pop("proxies", None)
        output_loading_info = kwargs.pop("output_loading_info", False)
991
        local_files_only = kwargs.pop("local_files_only", False)
992
        use_auth_token = kwargs.pop("use_auth_token", None)
Julien Chaumond's avatar
Julien Chaumond committed
993
        revision = kwargs.pop("revision", None)
994
        mirror = kwargs.pop("mirror", None)
995
996
997
998
999
1000
        from_pipeline = kwargs.pop("_from_pipeline", None)
        from_auto_class = kwargs.pop("_from_auto", False)

        user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class}
        if from_pipeline is not None:
            user_agent["using_pipeline"] = from_pipeline
thomwolf's avatar
thomwolf committed
1001

1002
1003
1004
1005
        if is_offline_mode() and not local_files_only:
            logger.info("Offline mode: forcing local_files_only=True")
            local_files_only = True

1006
1007
1008
        # Load config if we don't provide a configuration
        if not isinstance(config, PretrainedConfig):
            config_path = config if config is not None else pretrained_model_name_or_path
1009
            config, model_kwargs = cls.config_class.from_pretrained(
1010
1011
1012
1013
                config_path,
                *model_args,
                cache_dir=cache_dir,
                return_unused_kwargs=True,
1014
                force_download=force_download,
1015
                resume_download=resume_download,
1016
                proxies=proxies,
1017
                local_files_only=local_files_only,
1018
                use_auth_token=use_auth_token,
Julien Chaumond's avatar
Julien Chaumond committed
1019
                revision=revision,
1020
1021
                _from_auto=from_auto_class,
                _from_pipeline=from_pipeline,
1022
                **kwargs,
1023
1024
1025
            )
        else:
            model_kwargs = kwargs
1026

thomwolf's avatar
thomwolf committed
1027
        # Load model
thomwolf's avatar
thomwolf committed
1028
        if pretrained_model_name_or_path is not None:
1029
            pretrained_model_name_or_path = str(pretrained_model_name_or_path)
1030
            if os.path.isdir(pretrained_model_name_or_path):
thomwolf's avatar
thomwolf committed
1031
                if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
1032
                    # Load from a TF 1.0 checkpoint in priority if from_tf
thomwolf's avatar
thomwolf committed
1033
                    archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
thomwolf's avatar
thomwolf committed
1034
                elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
1035
                    # Load from a TF 2.0 checkpoint in priority if from_tf
thomwolf's avatar
thomwolf committed
1036
                    archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
1037
1038
1039
                elif from_flax and os.path.isfile(os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)):
                    # Load from a Flax checkpoint in priority if from_flax
                    archive_file = os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)
thomwolf's avatar
thomwolf committed
1040
1041
                elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
                    # Load from a PyTorch checkpoint
thomwolf's avatar
thomwolf committed
1042
                    archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
thomwolf's avatar
thomwolf committed
1043
                else:
1044
                    raise EnvironmentError(
1045
1046
                        f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + '.index', FLAX_WEIGHTS_NAME]} found in "
                        f"directory {pretrained_model_name_or_path} or `from_tf` and `from_flax` set to False."
1047
                    )
1048
            elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
1049
                archive_file = pretrained_model_name_or_path
1050
            elif os.path.isfile(pretrained_model_name_or_path + ".index"):
1051
1052
1053
1054
1055
                if not from_tf:
                    raise ValueError(
                        f"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set "
                        "from_tf to True to load from this checkpoint."
                    )
1056
                archive_file = pretrained_model_name_or_path + ".index"
1057
            else:
1058
1059
1060
1061
1062
1063
1064
1065
                # set correct filename
                if from_tf:
                    filename = TF2_WEIGHTS_NAME
                elif from_flax:
                    filename = FLAX_WEIGHTS_NAME
                else:
                    filename = WEIGHTS_NAME

thomwolf's avatar
thomwolf committed
1066
                archive_file = hf_bucket_url(
Julien Chaumond's avatar
Julien Chaumond committed
1067
                    pretrained_model_name_or_path,
1068
                    filename=filename,
Julien Chaumond's avatar
Julien Chaumond committed
1069
                    revision=revision,
1070
                    mirror=mirror,
thomwolf's avatar
thomwolf committed
1071
                )
1072

thomwolf's avatar
thomwolf committed
1073
            try:
1074
                # Load from URL or cache if already cached
1075
1076
1077
1078
1079
1080
                resolved_archive_file = cached_path(
                    archive_file,
                    cache_dir=cache_dir,
                    force_download=force_download,
                    proxies=proxies,
                    resume_download=resume_download,
1081
                    local_files_only=local_files_only,
1082
                    use_auth_token=use_auth_token,
1083
                    user_agent=user_agent,
1084
                )
Julien Chaumond's avatar
Julien Chaumond committed
1085
1086
            except EnvironmentError as err:
                logger.error(err)
1087
1088
1089
1090
1091
                msg = (
                    f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
                    f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
                    f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n"
                )
thomwolf's avatar
thomwolf committed
1092
1093
                raise EnvironmentError(msg)

thomwolf's avatar
thomwolf committed
1094
            if resolved_archive_file == archive_file:
1095
                logger.info(f"loading weights file {archive_file}")
1096
            else:
1097
                logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}")
1098
        else:
thomwolf's avatar
thomwolf committed
1099
            resolved_archive_file = None
1100

1101
1102
        config.name_or_path = pretrained_model_name_or_path

1103
        # Instantiate model.
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113

        if is_deepspeed_zero3_enabled():
            import deepspeed

            logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
            # this immediately partitions the model to avoid the overhead in time and memory copying it on CPU or each GPU first
            with deepspeed.zero.Init():
                model = cls(config, *model_args, **model_kwargs)
        else:
            model = cls(config, *model_args, **model_kwargs)
thomwolf's avatar
thomwolf committed
1114

1115
        if state_dict is None and not (from_tf or from_flax):
1116
            try:
1117
                state_dict = torch.load(resolved_archive_file, map_location="cpu")
1118
            except Exception:
1119
                raise OSError(
1120
1121
                    f"Unable to load weights from pytorch checkpoint file for '{pretrained_model_name_or_path}' "
                    f"at '{resolved_archive_file}'"
1122
1123
                    "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
                )
1124

1125
1126
1127
        missing_keys = []
        unexpected_keys = []
        error_msgs = []
1128
1129

        if from_tf:
1130
            if resolved_archive_file.endswith(".index"):
1131
1132
1133
1134
1135
                # Load from a TensorFlow 1.X checkpoint - provided by original authors
                model = cls.load_tf_weights(model, config, resolved_archive_file[:-6])  # Remove the '.index'
            else:
                # Load from our TensorFlow 2.0 checkpoints
                try:
1136
                    from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model
1137

1138
                    model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
1139
                except ImportError:
1140
1141
1142
1143
                    logger.error(
                        "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
                        "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
                    )
1144
                    raise
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
        elif from_flax:
            try:
                from .modeling_flax_pytorch_utils import load_flax_checkpoint_in_pytorch_model

                model = load_flax_checkpoint_in_pytorch_model(model, resolved_archive_file)
            except ImportError:
                logger.error(
                    "Loading a Flax model in PyTorch, requires both PyTorch and Flax to be installed. Please see "
                    "https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation instructions."
                )
                raise
1156
1157
1158
1159
1160
1161
        else:
            # Convert old format to new format if needed from a PyTorch state_dict
            old_keys = []
            new_keys = []
            for key in state_dict.keys():
                new_key = None
1162
1163
1164
1165
                if "gamma" in key:
                    new_key = key.replace("gamma", "weight")
                if "beta" in key:
                    new_key = key.replace("beta", "bias")
1166
1167
1168
1169
1170
1171
1172
                if new_key:
                    old_keys.append(key)
                    new_keys.append(new_key)
            for old_key, new_key in zip(old_keys, new_keys):
                state_dict[new_key] = state_dict.pop(old_key)

            # copy state_dict so _load_from_state_dict can modify it
1173
            metadata = getattr(state_dict, "_metadata", None)
1174
1175
1176
1177
            state_dict = state_dict.copy()
            if metadata is not None:
                state_dict._metadata = metadata

1178
1179
            # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
            # so we need to apply the function recursively.
Julien Chaumond's avatar
Julien Chaumond committed
1180
            def load(module: nn.Module, prefix=""):
1181
                local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
                args = (state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs)
                if is_deepspeed_zero3_enabled():
                    import deepspeed

                    # because zero3 puts placeholders in model params, this context
                    # manager gathers (unpartitions) the params of the current layer, then loads from
                    # the state dict and then re-partitions them again
                    with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0):
                        if torch.distributed.get_rank() == 0:
                            module._load_from_state_dict(*args)
                else:
                    module._load_from_state_dict(*args)

1195
1196
                for name, child in module._modules.items():
                    if child is not None:
1197
                        load(child, prefix + name + ".")
1198
1199

            # Make sure we are able to load base models as well as derived models (with heads)
1200
            start_prefix = ""
1201
            model_to_load = model
1202
1203
            has_prefix_module = any(s.startswith(cls.base_model_prefix) for s in state_dict.keys())
            if not hasattr(model, cls.base_model_prefix) and has_prefix_module:
1204
                start_prefix = cls.base_model_prefix + "."
1205
            if hasattr(model, cls.base_model_prefix) and not has_prefix_module:
1206
1207
1208
                model_to_load = getattr(model, cls.base_model_prefix)

            load(model_to_load, prefix=start_prefix)
1209
1210
1211
1212
1213
1214
1215
1216

            if model.__class__.__name__ != model_to_load.__class__.__name__:
                base_model_state_dict = model_to_load.state_dict().keys()
                head_model_state_dict_without_base_prefix = [
                    key.split(cls.base_model_prefix + ".")[-1] for key in model.state_dict().keys()
                ]
                missing_keys.extend(head_model_state_dict_without_base_prefix - base_model_state_dict)

1217
1218
            # Some models may have keys that are not in the state by design, removing them before needlessly warning
            # the user.
1219
1220
            if cls._keys_to_ignore_on_load_missing is not None:
                for pat in cls._keys_to_ignore_on_load_missing:
1221
1222
                    missing_keys = [k for k in missing_keys if re.search(pat, k) is None]

1223
1224
            if cls._keys_to_ignore_on_load_unexpected is not None:
                for pat in cls._keys_to_ignore_on_load_unexpected:
1225
1226
                    unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]

1227
1228
1229
1230
1231
            if len(unexpected_keys) > 0:
                logger.warning(
                    f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when "
                    f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
                    f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
1232
                    f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
1233
1234
1235
1236
1237
                    f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
                    f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
                )
            else:
                logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
1238
            if len(missing_keys) > 0:
1239
1240
1241
1242
                logger.warning(
                    f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
                    f"and are newly initialized: {missing_keys}\n"
                    f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
1243
                )
1244
            else:
1245
                logger.info(
1246
                    f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
Prajjwal Bhargava's avatar
Prajjwal Bhargava committed
1247
                    f"If your task is similar to the task the model of the checkpoint was trained on, "
1248
                    f"you can already use {model.__class__.__name__} for predictions without further training."
1249
                )
1250
            if len(error_msgs) > 0:
1251
1252
                error_msg = "\n\t".join(error_msgs)
                raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}")
1253
1254
        # make sure token embedding weights are still tied if needed
        model.tie_weights()
1255

1256
        # Set model in evaluation mode to deactivate DropOut modules by default
1257
1258
        model.eval()

thomwolf's avatar
thomwolf committed
1259
        if output_loading_info:
1260
1261
1262
1263
1264
            loading_info = {
                "missing_keys": missing_keys,
                "unexpected_keys": unexpected_keys,
                "error_msgs": error_msgs,
            }
thomwolf's avatar
thomwolf committed
1265
1266
            return model, loading_info

1267
1268
        return model

thomwolf's avatar
thomwolf committed
1269

thomwolf's avatar
thomwolf committed
1270
class Conv1D(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
    """
    1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).

    Basically works like a linear layer but the weights are transposed.

    Args:
        nf (:obj:`int`): The number of output features.
        nx (:obj:`int`): The number of input features.
    """

thomwolf's avatar
thomwolf committed
1281
    def __init__(self, nf, nx):
Julien Chaumond's avatar
Julien Chaumond committed
1282
        super().__init__()
thomwolf's avatar
thomwolf committed
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
        self.nf = nf
        w = torch.empty(nx, nf)
        nn.init.normal_(w, std=0.02)
        self.weight = nn.Parameter(w)
        self.bias = nn.Parameter(torch.zeros(nf))

    def forward(self, x):
        size_out = x.size()[:-1] + (self.nf,)
        x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
        x = x.view(*size_out)
        return x


thomwolf's avatar
thomwolf committed
1296
class PoolerStartLogits(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1297
1298
    """
    Compute SQuAD start logits from sequence hidden states.
1299

Sylvain Gugger's avatar
Sylvain Gugger committed
1300
1301
1302
1303
1304
1305
    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config used by the model, will be used to grab the :obj:`hidden_size` of the model.
    """

    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
1306
        super().__init__()
thomwolf's avatar
thomwolf committed
1307
1308
        self.dense = nn.Linear(config.hidden_size, 1)

Sylvain Gugger's avatar
Sylvain Gugger committed
1309
1310
1311
1312
1313
1314
1315
1316
    def forward(
        self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None
    ) -> torch.FloatTensor:
        """
        Args:
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
                The final hidden states of the model.
            p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1317
1318
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
Sylvain Gugger's avatar
Sylvain Gugger committed
1319
1320
1321

        Returns:
            :obj:`torch.FloatTensor`: The start logits for SQuAD.
thomwolf's avatar
thomwolf committed
1322
        """
thomwolf's avatar
thomwolf committed
1323
1324
1325
        x = self.dense(hidden_states).squeeze(-1)

        if p_mask is not None:
Lysandre Debut's avatar
Lysandre Debut committed
1326
            if get_parameter_dtype(self) == torch.float16:
1327
1328
1329
                x = x * (1 - p_mask) - 65500 * p_mask
            else:
                x = x * (1 - p_mask) - 1e30 * p_mask
thomwolf's avatar
thomwolf committed
1330
1331
1332
1333
1334
1335

        return x


class PoolerEndLogits(nn.Module):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1336
    Compute SQuAD end logits from sequence hidden states.
1337

Sylvain Gugger's avatar
Sylvain Gugger committed
1338
1339
1340
1341
1342
1343
1344
    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the
            :obj:`layer_norm_eps` to use.
    """

    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
1345
        super().__init__()
thomwolf's avatar
thomwolf committed
1346
1347
1348
1349
1350
        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
        self.activation = nn.Tanh()
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dense_1 = nn.Linear(config.hidden_size, 1)

Sylvain Gugger's avatar
Sylvain Gugger committed
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
    def forward(
        self,
        hidden_states: torch.FloatTensor,
        start_states: Optional[torch.FloatTensor] = None,
        start_positions: Optional[torch.LongTensor] = None,
        p_mask: Optional[torch.FloatTensor] = None,
    ) -> torch.FloatTensor:
        """
        Args:
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
                The final hidden states of the model.
            start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`):
                The hidden states of the first tokens for the labeled span.
            start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                The position of the first token for the labeled span.
            p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1367
1368
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
Sylvain Gugger's avatar
Sylvain Gugger committed
1369
1370
1371
1372
1373
1374
1375
1376

        .. note::

            One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set,
            ``start_positions`` overrides ``start_states``.

        Returns:
            :obj:`torch.FloatTensor`: The end logits for SQuAD.
thomwolf's avatar
thomwolf committed
1377
        """
1378
1379
1380
        assert (
            start_states is not None or start_positions is not None
        ), "One of start_states, start_positions should be not None"
thomwolf's avatar
thomwolf committed
1381
        if start_positions is not None:
1382
            slen, hsz = hidden_states.shape[-2:]
1383
1384
1385
            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            start_states = hidden_states.gather(-2, start_positions)  # shape (bsz, 1, hsz)
            start_states = start_states.expand(-1, slen, -1)  # shape (bsz, slen, hsz)
thomwolf's avatar
thomwolf committed
1386
1387
1388
1389
1390
1391
1392

        x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
        x = self.activation(x)
        x = self.LayerNorm(x)
        x = self.dense_1(x).squeeze(-1)

        if p_mask is not None:
Lysandre Debut's avatar
Lysandre Debut committed
1393
            if get_parameter_dtype(self) == torch.float16:
1394
1395
1396
                x = x * (1 - p_mask) - 65500 * p_mask
            else:
                x = x * (1 - p_mask) - 1e30 * p_mask
thomwolf's avatar
thomwolf committed
1397
1398
1399
1400
1401

        return x


class PoolerAnswerClass(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1402
1403
1404
1405
1406
1407
1408
    """
    Compute SQuAD 2.0 answer class from classification and start tokens hidden states.

    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config used by the model, will be used to grab the :obj:`hidden_size` of the model.
    """
1409

thomwolf's avatar
thomwolf committed
1410
    def __init__(self, config):
Julien Chaumond's avatar
Julien Chaumond committed
1411
        super().__init__()
thomwolf's avatar
thomwolf committed
1412
1413
1414
1415
        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
        self.activation = nn.Tanh()
        self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)

Sylvain Gugger's avatar
Sylvain Gugger committed
1416
1417
1418
1419
1420
1421
1422
    def forward(
        self,
        hidden_states: torch.FloatTensor,
        start_states: Optional[torch.FloatTensor] = None,
        start_positions: Optional[torch.LongTensor] = None,
        cls_index: Optional[torch.LongTensor] = None,
    ) -> torch.FloatTensor:
1423
1424
        """
        Args:
Sylvain Gugger's avatar
Sylvain Gugger committed
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
                The final hidden states of the model.
            start_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`, `optional`):
                The hidden states of the first tokens for the labeled span.
            start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                The position of the first token for the labeled span.
            cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token.

        .. note::

            One of ``start_states`` or ``start_positions`` should be not obj:`None`. If both are set,
            ``start_positions`` overrides ``start_states``.

        Returns:
            :obj:`torch.FloatTensor`: The SQuAD 2.0 answer class.
thomwolf's avatar
thomwolf committed
1441
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
1442
        # No dependency on end_feature so that we can obtain one single `cls_logits` for each sample.
1443
        hsz = hidden_states.shape[-1]
1444
1445
1446
        assert (
            start_states is not None or start_positions is not None
        ), "One of start_states, start_positions should be not None"
thomwolf's avatar
thomwolf committed
1447
        if start_positions is not None:
1448
1449
            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            start_states = hidden_states.gather(-2, start_positions).squeeze(-2)  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
1450
1451

        if cls_index is not None:
1452
1453
            cls_index = cls_index[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2)  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
1454
        else:
1455
            cls_token_state = hidden_states[:, -1, :]  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
1456
1457
1458
1459
1460
1461
1462
1463

        x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
        x = self.activation(x)
        x = self.dense_1(x).squeeze(-1)

        return x


1464
1465
1466
@dataclass
class SquadHeadOutput(ModelOutput):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
1467
    Base class for outputs of question answering models using a :class:`~transformers.modeling_utils.SQuADHead`.
1468
1469
1470

    Args:
        loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided):
Sylvain Gugger's avatar
Sylvain Gugger committed
1471
1472
            Classification loss as the sum of start token, end token (and is_impossible if provided) classification
            losses.
1473
1474
1475
1476
1477
        start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
            Log probabilities for the top config.start_n_top start token possibilities (beam-search).
        start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
            Indices for the top config.start_n_top start token possibilities (beam-search).
        end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
Sylvain Gugger's avatar
Sylvain Gugger committed
1478
1479
            Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities
            (beam-search).
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
        end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
            Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
        cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided):
            Log probabilities for the ``is_impossible`` label of the answers.

    """

    loss: Optional[torch.FloatTensor] = None
    start_top_log_probs: Optional[torch.FloatTensor] = None
    start_top_index: Optional[torch.LongTensor] = None
    end_top_log_probs: Optional[torch.FloatTensor] = None
    end_top_index: Optional[torch.LongTensor] = None
    cls_logits: Optional[torch.FloatTensor] = None


thomwolf's avatar
thomwolf committed
1495
class SQuADHead(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1496
1497
    r"""
    A SQuAD head inspired by XLNet.
1498

Sylvain Gugger's avatar
Sylvain Gugger committed
1499
1500
1501
1502
    Args:
        config (:class:`~transformers.PretrainedConfig`):
            The config used by the model, will be used to grab the :obj:`hidden_size` of the model and the
            :obj:`layer_norm_eps` to use.
thomwolf's avatar
thomwolf committed
1503
    """
1504

thomwolf's avatar
thomwolf committed
1505
    def __init__(self, config):
Julien Chaumond's avatar
Julien Chaumond committed
1506
        super().__init__()
thomwolf's avatar
thomwolf committed
1507
1508
1509
1510
1511
1512
1513
        self.start_n_top = config.start_n_top
        self.end_n_top = config.end_n_top

        self.start_logits = PoolerStartLogits(config)
        self.end_logits = PoolerEndLogits(config)
        self.answer_class = PoolerAnswerClass(config)

Sylvain Gugger's avatar
Sylvain Gugger committed
1514
    @replace_return_docstrings(output_type=SquadHeadOutput, config_class=PretrainedConfig)
1515
    def forward(
1516
        self,
Sylvain Gugger's avatar
Sylvain Gugger committed
1517
1518
1519
1520
1521
1522
        hidden_states: torch.FloatTensor,
        start_positions: Optional[torch.LongTensor] = None,
        end_positions: Optional[torch.LongTensor] = None,
        cls_index: Optional[torch.LongTensor] = None,
        is_impossible: Optional[torch.LongTensor] = None,
        p_mask: Optional[torch.FloatTensor] = None,
1523
        return_dict: bool = False,
Sylvain Gugger's avatar
Sylvain Gugger committed
1524
1525
    ) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]:
        """
Lysandre's avatar
Lysandre committed
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
        Args:
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len, hidden_size)`):
                Final hidden states of the model on the sequence tokens.
            start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Positions of the first token for the labeled span.
            end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Positions of the last token for the labeled span.
            cls_index (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Position of the CLS token for each sentence in the batch. If :obj:`None`, takes the last token.
            is_impossible (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`):
                Whether the question has a possible answer in the paragraph or not.
            p_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_len)`, `optional`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1538
1539
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
Lysandre's avatar
Lysandre committed
1540
            return_dict (:obj:`bool`, `optional`, defaults to :obj:`False`):
1541
                Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple.
Sylvain Gugger's avatar
Sylvain Gugger committed
1542

Lysandre's avatar
Lysandre committed
1543
        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
1544
        """
thomwolf's avatar
thomwolf committed
1545
        start_logits = self.start_logits(hidden_states, p_mask=p_mask)
thomwolf's avatar
thomwolf committed
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568

        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, let's remove the dimension added by batch splitting
            for x in (start_positions, end_positions, cls_index, is_impossible):
                if x is not None and x.dim() > 1:
                    x.squeeze_(-1)

            # during training, compute the end logits based on the ground truth of the start position
            end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)

            loss_fct = CrossEntropyLoss()
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2

            if cls_index is not None and is_impossible is not None:
                # Predict answerability from the representation of CLS and START
                cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
                loss_fct_cls = nn.BCEWithLogitsLoss()
                cls_loss = loss_fct_cls(cls_logits, is_impossible)

                # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
                total_loss += cls_loss * 0.5
1569

1570
            return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,)
thomwolf's avatar
thomwolf committed
1571
1572
1573
1574

        else:
            # during inference, compute the end logits based on beam search
            bsz, slen, hsz = hidden_states.size()
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
            start_log_probs = F.softmax(start_logits, dim=-1)  # shape (bsz, slen)

            start_top_log_probs, start_top_index = torch.topk(
                start_log_probs, self.start_n_top, dim=-1
            )  # shape (bsz, start_n_top)
            start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz)  # shape (bsz, start_n_top, hsz)
            start_states = torch.gather(hidden_states, -2, start_top_index_exp)  # shape (bsz, start_n_top, hsz)
            start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1)  # shape (bsz, slen, start_n_top, hsz)

            hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
                start_states
            )  # shape (bsz, slen, start_n_top, hsz)
thomwolf's avatar
thomwolf committed
1587
1588
            p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
            end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
1589
            end_log_probs = F.softmax(end_logits, dim=1)  # shape (bsz, slen, start_n_top)
thomwolf's avatar
thomwolf committed
1590

1591
1592
1593
            end_top_log_probs, end_top_index = torch.topk(
                end_log_probs, self.end_n_top, dim=1
            )  # shape (bsz, end_n_top, start_n_top)
thomwolf's avatar
thomwolf committed
1594
1595
1596
1597
1598
1599
            end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
            end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)

            start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
            cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)

1600
            if not return_dict:
1601
1602
1603
1604
1605
1606
1607
1608
1609
                return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits)
            else:
                return SquadHeadOutput(
                    start_top_log_probs=start_top_log_probs,
                    start_top_index=start_top_index,
                    end_top_log_probs=end_top_log_probs,
                    end_top_index=end_top_index,
                    cls_logits=cls_logits,
                )
thomwolf's avatar
thomwolf committed
1610
1611
1612


class SequenceSummary(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
1613
1614
1615
1616
1617
    r"""
    Compute a single vector summary of a sequence hidden states.

    Args:
        config (:class:`~transformers.PretrainedConfig`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1618
1619
            The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
            config class of your model for the default values it uses):
Sylvain Gugger's avatar
Sylvain Gugger committed
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631

            - **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are:

                - :obj:`"last"` -- Take the last token hidden state (like XLNet)
                - :obj:`"first"` -- Take the first token hidden state (like Bert)
                - :obj:`"mean"` -- Take the mean of all tokens hidden states
                - :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
                - :obj:`"attn"` -- Not implemented now, use multi-head attention

            - **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction.
            - **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to
              :obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`).
Sylvain Gugger's avatar
Sylvain Gugger committed
1632
            - **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the
Sylvain Gugger's avatar
Sylvain Gugger committed
1633
1634
1635
1636
1637
              output, another string or :obj:`None` will add no activation.
            - **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and
              activation.
            - **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and
              activation.
thomwolf's avatar
thomwolf committed
1638
    """
1639

1640
    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
1641
        super().__init__()
thomwolf's avatar
thomwolf committed
1642

1643
        self.summary_type = getattr(config, "summary_type", "last")
1644
        if self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
1645
1646
1647
1648
1649
            # We should use a standard multi-head attention module with absolute positional embedding for that.
            # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
            # We can probably just use the multi-head attention module of PyTorch >=1.1.0
            raise NotImplementedError

thomwolf's avatar
thomwolf committed
1650
        self.summary = Identity()
1651
1652
        if hasattr(config, "summary_use_proj") and config.summary_use_proj:
            if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
1653
                num_classes = config.num_labels
thomwolf's avatar
thomwolf committed
1654
1655
1656
1657
            else:
                num_classes = config.hidden_size
            self.summary = nn.Linear(config.hidden_size, num_classes)

1658
        activation_string = getattr(config, "summary_activation", None)
Lysandre's avatar
Lysandre committed
1659
        self.activation: Callable = get_activation(activation_string) if activation_string else Identity()
thomwolf's avatar
thomwolf committed
1660

thomwolf's avatar
thomwolf committed
1661
        self.first_dropout = Identity()
1662
        if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
1663
1664
            self.first_dropout = nn.Dropout(config.summary_first_dropout)

thomwolf's avatar
thomwolf committed
1665
        self.last_dropout = Identity()
1666
        if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
1667
            self.last_dropout = nn.Dropout(config.summary_last_dropout)
thomwolf's avatar
thomwolf committed
1668

Sylvain Gugger's avatar
Sylvain Gugger committed
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
    def forward(
        self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None
    ) -> torch.FloatTensor:
        """
        Compute a single vector summary of a sequence hidden states.

        Args:
            hidden_states (:obj:`torch.FloatTensor` of shape :obj:`[batch_size, seq_len, hidden_size]`):
                The hidden states of the last layer.
            cls_index (:obj:`torch.LongTensor` of shape :obj:`[batch_size]` or :obj:`[batch_size, ...]` where ... are optional leading dimensions of :obj:`hidden_states`, `optional`):
                Used if :obj:`summary_type == "cls_index"` and takes the last token of the sequence as classification
                token.

        Returns:
            :obj:`torch.FloatTensor`: The summary of the sequence hidden states.
thomwolf's avatar
thomwolf committed
1684
        """
1685
        if self.summary_type == "last":
thomwolf's avatar
thomwolf committed
1686
            output = hidden_states[:, -1]
1687
        elif self.summary_type == "first":
thomwolf's avatar
thomwolf committed
1688
            output = hidden_states[:, 0]
1689
        elif self.summary_type == "mean":
thomwolf's avatar
thomwolf committed
1690
            output = hidden_states.mean(dim=1)
1691
        elif self.summary_type == "cls_index":
thomwolf's avatar
thomwolf committed
1692
            if cls_index is None:
Lysandre's avatar
Lysandre committed
1693
1694
1695
1696
1697
                cls_index = torch.full_like(
                    hidden_states[..., :1, :],
                    hidden_states.shape[-2] - 1,
                    dtype=torch.long,
                )
thomwolf's avatar
thomwolf committed
1698
            else:
thomwolf's avatar
thomwolf committed
1699
                cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
1700
                cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
thomwolf's avatar
thomwolf committed
1701
            # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
1702
1703
            output = hidden_states.gather(-2, cls_index).squeeze(-2)  # shape (bsz, XX, hidden_size)
        elif self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
1704
1705
            raise NotImplementedError

1706
        output = self.first_dropout(output)
thomwolf's avatar
thomwolf committed
1707
1708
        output = self.summary(output)
        output = self.activation(output)
1709
        output = self.last_dropout(output)
thomwolf's avatar
thomwolf committed
1710
1711
1712
1713

        return output


1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
def unwrap_model(model: torch.nn.Module) -> torch.nn.Module:
    """
    Recursively unwraps a model from potential containers (as used in distributed training).

    Args:
        model (:obj:`torch.nn.Module`): The model to unwrap.
    """
    # since there could be multiple levels of wrapping, unwrap recursively
    if hasattr(model, "module"):
        return unwrap_model(model.module)
    else:
        return model


Sylvain Gugger's avatar
Sylvain Gugger committed
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
def prune_linear_layer(layer: torch.nn.Linear, index: torch.LongTensor, dim: int = 0) -> torch.nn.Linear:
    """
    Prune a linear layer to keep only entries in index.

    Used to remove heads.

    Args:
        layer (:obj:`torch.nn.Linear`): The layer to prune.
        index (:obj:`torch.LongTensor`): The indices to keep in the layer.
        dim (:obj:`int`, `optional`, defaults to 0): The dimension on which to keep the indices.

    Returns:
        :obj:`torch.nn.Linear`: The pruned layer as a new layer with :obj:`requires_grad=True`.
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
    """
    index = index.to(layer.weight.device)
    W = layer.weight.index_select(dim, index).clone().detach()
    if layer.bias is not None:
        if dim == 1:
            b = layer.bias.clone().detach()
        else:
            b = layer.bias[index].clone().detach()
    new_size = list(layer.weight.size())
    new_size[dim] = len(index)
    new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
    new_layer.weight.requires_grad = False
    new_layer.weight.copy_(W.contiguous())
    new_layer.weight.requires_grad = True
    if layer.bias is not None:
        new_layer.bias.requires_grad = False
        new_layer.bias.copy_(b.contiguous())
        new_layer.bias.requires_grad = True
    return new_layer


Sylvain Gugger's avatar
Sylvain Gugger committed
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
def prune_conv1d_layer(layer: Conv1D, index: torch.LongTensor, dim: int = 1) -> Conv1D:
    """
    Prune a Conv1D layer to keep only entries in index. A Conv1D work as a Linear layer (see e.g. BERT) but the weights
    are transposed.

    Used to remove heads.

    Args:
        layer (:class:`~transformers.modeling_utils.Conv1D`): The layer to prune.
        index (:obj:`torch.LongTensor`): The indices to keep in the layer.
        dim (:obj:`int`, `optional`, defaults to 1): The dimension on which to keep the indices.

    Returns:
        :class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with :obj:`requires_grad=True`.
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
    """
    index = index.to(layer.weight.device)
    W = layer.weight.index_select(dim, index).clone().detach()
    if dim == 0:
        b = layer.bias.clone().detach()
    else:
        b = layer.bias[index].clone().detach()
    new_size = list(layer.weight.size())
    new_size[dim] = len(index)
    new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
    new_layer.weight.requires_grad = False
    new_layer.weight.copy_(W.contiguous())
    new_layer.weight.requires_grad = True
    new_layer.bias.requires_grad = False
    new_layer.bias.copy_(b.contiguous())
    new_layer.bias.requires_grad = True
    return new_layer
1793
1794


Sylvain Gugger's avatar
Sylvain Gugger committed
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
def prune_layer(
    layer: Union[torch.nn.Linear, Conv1D], index: torch.LongTensor, dim: Optional[int] = None
) -> Union[torch.nn.Linear, Conv1D]:
    """
    Prune a Conv1D or linear layer to keep only entries in index.

    Used to remove heads.

    Args:
        layer (:obj:`Union[torch.nn.Linear, Conv1D]`): The layer to prune.
        index (:obj:`torch.LongTensor`): The indices to keep in the layer.
        dim (:obj:`int`, `optional`): The dimension on which to keep the indices.

    Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
1809
1810
        :obj:`torch.nn.Linear` or :class:`~transformers.modeling_utils.Conv1D`: The pruned layer as a new layer with
        :obj:`requires_grad=True`.
1811
1812
1813
1814
1815
1816
    """
    if isinstance(layer, nn.Linear):
        return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
    elif isinstance(layer, Conv1D):
        return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
    else:
1817
        raise ValueError(f"Can't prune layer of class {layer.__class__}")
Patrick von Platen's avatar
Patrick von Platen committed
1818
1819
1820


def apply_chunking_to_forward(
1821
    forward_fn: Callable[..., torch.Tensor], chunk_size: int, chunk_dim: int, *input_tensors
Patrick von Platen's avatar
Patrick von Platen committed
1822
1823
) -> torch.Tensor:
    """
1824
1825
1826
1827
1828
    This function chunks the :obj:`input_tensors` into smaller input tensor parts of size :obj:`chunk_size` over the
    dimension :obj:`chunk_dim`. It then applies a layer :obj:`forward_fn` to each chunk independently to save memory.

    If the :obj:`forward_fn` is independent across the :obj:`chunk_dim` this function will yield the same result as
    directly applying :obj:`forward_fn` to :obj:`input_tensors`.
Patrick von Platen's avatar
Patrick von Platen committed
1829
1830

    Args:
1831
1832
        forward_fn (:obj:`Callable[..., torch.Tensor]`):
            The forward function of the model.
1833
1834
1835
1836
1837
        chunk_size (:obj:`int`):
            The chunk size of a chunked tensor: :obj:`num_chunks = len(input_tensors[0]) / chunk_size`.
        chunk_dim (:obj:`int`):
            The dimension over which the :obj:`input_tensors` should be chunked.
        input_tensors (:obj:`Tuple[torch.Tensor]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1838
1839
            The input tensors of ``forward_fn`` which will be chunked

Patrick von Platen's avatar
Patrick von Platen committed
1840
    Returns:
1841
        :obj:`torch.Tensor`: A tensor with the same shape as the :obj:`forward_fn` would have given if applied`.
Patrick von Platen's avatar
Patrick von Platen committed
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852


    Examples::

        # rename the usual forward() fn to forward_chunk()
        def forward_chunk(self, hidden_states):
            hidden_states = self.decoder(hidden_states)
            return hidden_states

        # implement a chunked forward function
        def forward(self, hidden_states):
1853
            return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states)
Patrick von Platen's avatar
Patrick von Platen committed
1854
1855
    """

1856
    assert len(input_tensors) > 0, f"{input_tensors} has to be a tuple/list of tensors"
1857
    tensor_shape = input_tensors[0].shape[chunk_dim]
Patrick von Platen's avatar
Patrick von Platen committed
1858
    assert all(
1859
        input_tensor.shape[chunk_dim] == tensor_shape for input_tensor in input_tensors
Patrick von Platen's avatar
Patrick von Platen committed
1860
1861
    ), "All input tenors have to be of the same shape"

1862
    # inspect.signature exist since python 3.5 and is a python method -> no problem with backward compatibility
Patrick von Platen's avatar
Patrick von Platen committed
1863
    num_args_in_forward_chunk_fn = len(inspect.signature(forward_fn).parameters)
1864
1865
1866
1867
1868
    if num_args_in_forward_chunk_fn != len(input_tensors):
        raise ValueError(
            f"forward_chunk_fn expects {num_args_in_forward_chunk_fn} arguments, but only {len(input_tensors)} input "
            "tensors are given"
        )
Patrick von Platen's avatar
Patrick von Platen committed
1869
1870

    if chunk_size > 0:
1871
1872
1873
1874
1875
        if input_tensors[0].shape[chunk_dim] % chunk_size != 0:
            raise ValueError(
                f"The dimension to be chunked {input_tensors[0].shape[chunk_dim]} has to be a multiple of the chunk "
                f"size {chunk_size}"
            )
Patrick von Platen's avatar
Patrick von Platen committed
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886

        num_chunks = input_tensors[0].shape[chunk_dim] // chunk_size

        # chunk input tensor into tuples
        input_tensors_chunks = tuple(input_tensor.chunk(num_chunks, dim=chunk_dim) for input_tensor in input_tensors)
        # apply forward fn to every tuple
        output_chunks = tuple(forward_fn(*input_tensors_chunk) for input_tensors_chunk in zip(*input_tensors_chunks))
        # concatenate output at same dimension
        return torch.cat(output_chunks, dim=chunk_dim)

    return forward_fn(*input_tensors)