"...models/git@developer.sourcefind.cn:Wenxuan/LightX2V.git" did not exist on "bc7c3e8700df9239effdcbf8ee5c69b49ba94e58"
modeling_utils.py 67.6 KB
Newer Older
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PyTorch BERT model."""

18
from __future__ import absolute_import, division, print_function, unicode_literals
19
20
21
22
23
24

import logging
import os

import torch
from torch import nn
25
26
from torch.nn import CrossEntropyLoss
from torch.nn import functional as F
27

28
from .configuration_utils import PretrainedConfig
29
from .file_utils import (
Aymeric Augustin's avatar
Aymeric Augustin committed
30
    DUMMY_INPUTS,
31
32
33
34
35
36
37
    TF2_WEIGHTS_NAME,
    TF_WEIGHTS_NAME,
    WEIGHTS_NAME,
    cached_path,
    hf_bucket_url,
    is_remote_url,
)
38

Aymeric Augustin's avatar
Aymeric Augustin committed
39

40
41
logger = logging.getLogger(__name__)

thomwolf's avatar
thomwolf committed
42
43
44
45
46
47
48
try:
    from torch.nn import Identity
except ImportError:
    # Older PyTorch compatibility
    class Identity(nn.Module):
        r"""A placeholder identity operator that is argument-insensitive.
        """
49

thomwolf's avatar
thomwolf committed
50
51
52
53
54
55
        def __init__(self, *args, **kwargs):
            super(Identity, self).__init__()

        def forward(self, input):
            return input

56

57
class PreTrainedModel(nn.Module):
58
59
    r""" Base class for all models.

60
        :class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models
Julien Chaumond's avatar
Julien Chaumond committed
61
        as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads.
62
63

        Class attributes (overridden by derived classes):
64
            - ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
65
66
67
            - ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values.
            - ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:

68
69
                - ``model``: an instance of the relevant subclass of :class:`~transformers.PreTrainedModel`,
                - ``config``: an instance of the relevant subclass of :class:`~transformers.PretrainedConfig`,
70
71
72
                - ``path``: a path (string) to the TensorFlow checkpoint.

            - ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.
73
    """
74
    config_class = None
75
76
77
    pretrained_model_archive_map = {}
    base_model_prefix = ""

78
79
80
81
82
83
84
    @property
    def dummy_inputs(self):
        """ Dummy inputs to do a forward pass in the network.

        Returns:
            torch.Tensor with dummy inputs
        """
85
        return {"input_ids": torch.tensor(DUMMY_INPUTS)}
86

87
88
89
90
91
92
93
94
    def __init__(self, config, *inputs, **kwargs):
        super(PreTrainedModel, self).__init__()
        if not isinstance(config, PretrainedConfig):
            raise ValueError(
                "Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. "
                "To create a model from a pretrained model use "
                "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
                    self.__class__.__name__, self.__class__.__name__
95
96
                )
            )
thomwolf's avatar
thomwolf committed
97
        # Save config in model
98
99
        self.config = config

100
101
102
    @property
    def base_model(self):
        return getattr(self, self.base_model_prefix, self)
thomwolf's avatar
thomwolf committed
103

thomwolf's avatar
thomwolf committed
104
105
    def get_input_embeddings(self):
        """ Get model's input embeddings
thomwolf's avatar
thomwolf committed
106
        """
107
        base_model = getattr(self, self.base_model_prefix, self)
thomwolf's avatar
thomwolf committed
108
109
110
111
        if base_model is not self:
            return base_model.get_input_embeddings()
        else:
            raise NotImplementedError
thomwolf's avatar
thomwolf committed
112

thomwolf's avatar
thomwolf committed
113
114
115
116
117
118
119
120
    def set_input_embeddings(self, value):
        """ Set model's input embeddings
        """
        base_model = getattr(self, self.base_model_prefix, self)
        if base_model is not self:
            base_model.set_input_embeddings(value)
        else:
            raise NotImplementedError
thomwolf's avatar
thomwolf committed
121

thomwolf's avatar
thomwolf committed
122
123
124
125
    def get_output_embeddings(self):
        """ Get model's output embeddings
            Return None if the model doesn't have output embeddings
        """
126
        return None  # Overwrite for models with output embeddings
thomwolf's avatar
thomwolf committed
127

128
129
130
    def tie_weights(self):
        """ Make sure we are sharing the input and output embeddings.
            Export to TorchScript can't handle parameter sharing so we are cloning them instead.
thomwolf's avatar
thomwolf committed
131
        """
thomwolf's avatar
thomwolf committed
132
133
134
        output_embeddings = self.get_output_embeddings()
        if output_embeddings is not None:
            self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
thomwolf's avatar
thomwolf committed
135

136
    def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
thomwolf's avatar
thomwolf committed
137
138
139
        """ Tie or clone module weights depending of weither we are using TorchScript or not
        """
        if self.config.torchscript:
140
            output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
thomwolf's avatar
thomwolf committed
141
        else:
142
            output_embeddings.weight = input_embeddings.weight
thomwolf's avatar
thomwolf committed
143

144
        if hasattr(output_embeddings, "bias") and output_embeddings.bias is not None:
145
146
147
            output_embeddings.bias.data = torch.nn.functional.pad(
                output_embeddings.bias.data,
                (0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]),
148
149
                "constant",
                0,
150
            )
151
        if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
152
            output_embeddings.out_features = input_embeddings.num_embeddings
153

thomwolf's avatar
thomwolf committed
154
155
    def resize_token_embeddings(self, new_num_tokens=None):
        """ Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.
156
        Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
thomwolf's avatar
thomwolf committed
157

158
159
160
        Arguments:

            new_num_tokens: (`optional`) int:
161
                New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end.
162
                If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model.
thomwolf's avatar
thomwolf committed
163

thomwolf's avatar
thomwolf committed
164
        Return: ``torch.nn.Embeddings``
165
            Pointer to the input tokens Embeddings Module of the model
thomwolf's avatar
thomwolf committed
166
167
        """
        base_model = getattr(self, self.base_model_prefix, self)  # get the base model if needed
thomwolf's avatar
thomwolf committed
168
169
170
        model_embeds = base_model._resize_token_embeddings(new_num_tokens)
        if new_num_tokens is None:
            return model_embeds
thomwolf's avatar
thomwolf committed
171
172
173
174
175
176

        # Update base model and current model config
        self.config.vocab_size = new_num_tokens
        base_model.vocab_size = new_num_tokens

        # Tie weights again if needed
177
        self.tie_weights()
thomwolf's avatar
thomwolf committed
178

thomwolf's avatar
thomwolf committed
179
180
        return model_embeds

181
    def _resize_token_embeddings(self, new_num_tokens):
thomwolf's avatar
thomwolf committed
182
183
184
185
        old_embeddings = self.get_input_embeddings()
        new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
        self.set_input_embeddings(new_embeddings)
        return self.get_input_embeddings()
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220

    def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None):
        """ Build a resized Embedding Module from a provided token Embedding Module.
            Increasing the size will add newly initialized vectors at the end
            Reducing the size will remove vectors from the end

        Args:
            new_num_tokens: (`optional`) int
                New number of tokens in the embedding matrix.
                Increasing the size will add newly initialized vectors at the end
                Reducing the size will remove vectors from the end
                If not provided or None: return the provided token Embedding Module.
        Return: ``torch.nn.Embeddings``
            Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None
        """
        if new_num_tokens is None:
            return old_embeddings

        old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
        if old_num_tokens == new_num_tokens:
            return old_embeddings

        # Build new embeddings
        new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
        new_embeddings.to(old_embeddings.weight.device)

        # initialize all new embeddings (in particular added tokens)
        self._init_weights(new_embeddings)

        # Copy word embeddings from the previous weights
        num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
        new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]

        return new_embeddings

221
222
223
224
225
226
227
228
229
    def init_weights(self):
        """ Initialize and prunes weights if needed. """
        # Initialize weights
        self.apply(self._init_weights)

        # Prune heads if needed
        if self.config.pruned_heads:
            self.prune_heads(self.config.pruned_heads)

230
231
232
        # Tie weights if needed
        self.tie_weights()

thomwolf's avatar
thomwolf committed
233
234
    def prune_heads(self, heads_to_prune):
        """ Prunes heads of the base model.
235
236
237
238

            Arguments:

                heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`).
239
                E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
thomwolf's avatar
thomwolf committed
240
        """
241
        # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
242
        for layer, heads in heads_to_prune.items():
243
244
245
            union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
            self.config.pruned_heads[layer] = list(union_heads)  # Unfortunately we have to store it as list for JSON

246
        self.base_model._prune_heads(heads_to_prune)
thomwolf's avatar
thomwolf committed
247

248
    def save_pretrained(self, save_directory):
249
        """ Save a model and its configuration file to a directory, so that it
250
            can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.
251
        """
252
253
254
        assert os.path.isdir(
            save_directory
        ), "Saving path should be a directory where the model and configuration can be saved"
255

Julien Chaumond's avatar
Julien Chaumond committed
256
        # Only save the model itself if we are using distributed training
257
        model_to_save = self.module if hasattr(self, "module") else self
258

thomwolf's avatar
thomwolf committed
259
260
261
        # Save configuration file
        model_to_save.config.save_pretrained(save_directory)

262
263
264
        # If we save using the predefined names, we can load using `from_pretrained`
        output_model_file = os.path.join(save_directory, WEIGHTS_NAME)
        torch.save(model_to_save.state_dict(), output_model_file)
thomwolf's avatar
thomwolf committed
265
        logger.info("Model weights saved in {}".format(output_model_file))
266

267
    @classmethod
268
    def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
269
270
        r"""Instantiate a pretrained pytorch model from a pre-trained model configuration.

271
272
273
        The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated)
        To train the model, you should first set it back in training mode with ``model.train()``

274
275
276
277
278
        The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model.
        It is up to you to train those weights with a downstream fine-tuning task.

        The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded.

279
280
281
282
        Parameters:
            pretrained_model_name_or_path: either:

                - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.
283
                - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.
284
                - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.
285
                - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
thomwolf's avatar
thomwolf committed
286
                - None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``)
287
288
289
290

            model_args: (`optional`) Sequence of positional arguments:
                All remaning positional arguments will be passed to the underlying model's ``__init__`` method

291
292
293
            config: (`optional`) one of:
                    - an instance of a class derived from :class:`~transformers.PretrainedConfig`, or
                    - a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()`
294
295
296
                Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:

                - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or
297
                - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.
298
299
300
301
                - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.

            state_dict: (`optional`) dict:
                an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.
thomwolf's avatar
typos  
thomwolf committed
302
                This option can be used if you want to create a model from a pretrained configuration but load your own weights.
303
                In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.
304
305

            cache_dir: (`optional`) string:
thomwolf's avatar
thomwolf committed
306
307
                Path to a directory in which a downloaded pre-trained model
                configuration should be cached if the standard cache should not be used.
308

309
310
311
            force_download: (`optional`) boolean, default False:
                Force to (re-)download the model weights and configuration files and override the cached versions if they exists.

312
313
314
            resume_download: (`optional`) boolean, default False:
                Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.

315
316
317
318
            proxies: (`optional`) dict, default None:
                A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
                The proxies are used on each request.

319
            output_loading_info: (`optional`) boolean:
thomwolf's avatar
thomwolf committed
320
                Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.
321
322
323
324
325

            kwargs: (`optional`) Remaining dictionary of keyword arguments:
                Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:

                - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)
326
                - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.
327
328

        Examples::
thomwolf's avatar
thomwolf committed
329

thomwolf's avatar
thomwolf committed
330
331
332
333
334
335
336
            model = BertModel.from_pretrained('bert-base-uncased')    # Download model and configuration from S3 and cache.
            model = BertModel.from_pretrained('./test/saved_model/')  # E.g. model was saved using `save_pretrained('./test/saved_model/')`
            model = BertModel.from_pretrained('bert-base-uncased', output_attention=True)  # Update configuration during loading
            assert model.config.output_attention == True
            # Loading from a TF checkpoint file instead of a PyTorch model (slower)
            config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')
            model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)
thomwolf's avatar
thomwolf committed
337

338
        """
339
340
341
342
343
344
345
346
        config = kwargs.pop("config", None)
        state_dict = kwargs.pop("state_dict", None)
        cache_dir = kwargs.pop("cache_dir", None)
        from_tf = kwargs.pop("from_tf", False)
        force_download = kwargs.pop("force_download", False)
        resume_download = kwargs.pop("resume_download", False)
        proxies = kwargs.pop("proxies", None)
        output_loading_info = kwargs.pop("output_loading_info", False)
thomwolf's avatar
thomwolf committed
347

348
349
350
        # Load config if we don't provide a configuration
        if not isinstance(config, PretrainedConfig):
            config_path = config if config is not None else pretrained_model_name_or_path
351
            config, model_kwargs = cls.config_class.from_pretrained(
352
353
354
355
                config_path,
                *model_args,
                cache_dir=cache_dir,
                return_unused_kwargs=True,
356
                force_download=force_download,
357
                resume_download=resume_download,
358
                proxies=proxies,
359
                **kwargs
360
361
362
            )
        else:
            model_kwargs = kwargs
363

thomwolf's avatar
thomwolf committed
364
        # Load model
thomwolf's avatar
thomwolf committed
365
        if pretrained_model_name_or_path is not None:
366
            if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
thomwolf's avatar
thomwolf committed
367
368
                archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]
            elif os.path.isdir(pretrained_model_name_or_path):
thomwolf's avatar
thomwolf committed
369
370
                if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
                    # Load from a TF 1.0 checkpoint
thomwolf's avatar
thomwolf committed
371
                    archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
thomwolf's avatar
thomwolf committed
372
373
374
375
376
                elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
                    # Load from a TF 2.0 checkpoint
                    archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
                elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
                    # Load from a PyTorch checkpoint
thomwolf's avatar
thomwolf committed
377
                    archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
thomwolf's avatar
thomwolf committed
378
                else:
379
380
381
382
383
                    raise EnvironmentError(
                        "Error no file named {} found in directory {} or `from_tf` set to False".format(
                            [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"], pretrained_model_name_or_path
                        )
                    )
384
            elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
385
                archive_file = pretrained_model_name_or_path
386
            elif os.path.isfile(pretrained_model_name_or_path + ".index"):
387
388
389
390
391
                assert (
                    from_tf
                ), "We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint".format(
                    pretrained_model_name_or_path + ".index"
                )
392
                archive_file = pretrained_model_name_or_path + ".index"
393
            else:
394
                archive_file = hf_bucket_url(pretrained_model_name_or_path, postfix=WEIGHTS_NAME)
Julien Chaumond's avatar
Julien Chaumond committed
395
                if from_tf:
396
397
398
                    raise EnvironmentError(
                        "Loading a PyTorch model from a TF checkpoint is not supported when using a model identifier name."
                    )
399

thomwolf's avatar
thomwolf committed
400
401
            # redirect to the cache, if necessary
            try:
402
403
404
405
406
407
408
                resolved_archive_file = cached_path(
                    archive_file,
                    cache_dir=cache_dir,
                    force_download=force_download,
                    proxies=proxies,
                    resume_download=resume_download,
                )
thomwolf's avatar
thomwolf committed
409
            except EnvironmentError:
thomwolf's avatar
thomwolf committed
410
                if pretrained_model_name_or_path in cls.pretrained_model_archive_map:
411
                    msg = "Couldn't reach server at '{}' to download pretrained weights.".format(archive_file)
thomwolf's avatar
thomwolf committed
412
                else:
413
414
415
                    msg = (
                        "Model name '{}' was not found in model name list ({}). "
                        "We assumed '{}' was a path or url to model weight files named one of {} but "
thomwolf's avatar
thomwolf committed
416
                        "couldn't find any such file at this path or url.".format(
thomwolf's avatar
thomwolf committed
417
                            pretrained_model_name_or_path,
418
                            ", ".join(cls.pretrained_model_archive_map.keys()),
thomwolf's avatar
thomwolf committed
419
                            archive_file,
420
421
422
                            [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME],
                        )
                    )
thomwolf's avatar
thomwolf committed
423
424
                raise EnvironmentError(msg)

thomwolf's avatar
thomwolf committed
425
426
            if resolved_archive_file == archive_file:
                logger.info("loading weights file {}".format(archive_file))
427
            else:
428
                logger.info("loading weights file {} from cache at {}".format(archive_file, resolved_archive_file))
429
        else:
thomwolf's avatar
thomwolf committed
430
            resolved_archive_file = None
431
432

        # Instantiate model.
433
        model = cls(config, *model_args, **model_kwargs)
thomwolf's avatar
thomwolf committed
434

435
        if state_dict is None and not from_tf:
436
            try:
437
                state_dict = torch.load(resolved_archive_file, map_location="cpu")
438
            except Exception:
439
440
441
442
                raise OSError(
                    "Unable to load weights from pytorch checkpoint file. "
                    "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
                )
443

444
445
446
        missing_keys = []
        unexpected_keys = []
        error_msgs = []
447
448

        if from_tf:
449
            if resolved_archive_file.endswith(".index"):
450
451
452
453
454
                # Load from a TensorFlow 1.X checkpoint - provided by original authors
                model = cls.load_tf_weights(model, config, resolved_archive_file[:-6])  # Remove the '.index'
            else:
                # Load from our TensorFlow 2.0 checkpoints
                try:
455
                    from transformers import load_tf2_checkpoint_in_pytorch_model
456

457
458
                    model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
                except ImportError as e:
459
460
461
462
                    logger.error(
                        "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
                        "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
                    )
463
464
465
466
467
468
469
                    raise e
        else:
            # Convert old format to new format if needed from a PyTorch state_dict
            old_keys = []
            new_keys = []
            for key in state_dict.keys():
                new_key = None
470
471
472
473
                if "gamma" in key:
                    new_key = key.replace("gamma", "weight")
                if "beta" in key:
                    new_key = key.replace("beta", "bias")
474
475
476
477
478
479
480
                if new_key:
                    old_keys.append(key)
                    new_keys.append(new_key)
            for old_key, new_key in zip(old_keys, new_keys):
                state_dict[new_key] = state_dict.pop(old_key)

            # copy state_dict so _load_from_state_dict can modify it
481
            metadata = getattr(state_dict, "_metadata", None)
482
483
484
485
            state_dict = state_dict.copy()
            if metadata is not None:
                state_dict._metadata = metadata

486
487
            # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
            # so we need to apply the function recursively.
488
            def load(module, prefix=""):
489
490
                local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
                module._load_from_state_dict(
491
492
                    state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs
                )
493
494
                for name, child in module._modules.items():
                    if child is not None:
495
                        load(child, prefix + name + ".")
496
497

            # Make sure we are able to load base models as well as derived models (with heads)
498
            start_prefix = ""
499
            model_to_load = model
500
501
502
503
504
505
506
            if not hasattr(model, cls.base_model_prefix) and any(
                s.startswith(cls.base_model_prefix) for s in state_dict.keys()
            ):
                start_prefix = cls.base_model_prefix + "."
            if hasattr(model, cls.base_model_prefix) and not any(
                s.startswith(cls.base_model_prefix) for s in state_dict.keys()
            ):
507
508
509
510
                model_to_load = getattr(model, cls.base_model_prefix)

            load(model_to_load, prefix=start_prefix)
            if len(missing_keys) > 0:
511
512
513
514
515
                logger.info(
                    "Weights of {} not initialized from pretrained model: {}".format(
                        model.__class__.__name__, missing_keys
                    )
                )
516
            if len(unexpected_keys) > 0:
517
518
519
520
521
                logger.info(
                    "Weights from pretrained model not used in {}: {}".format(
                        model.__class__.__name__, unexpected_keys
                    )
                )
522
            if len(error_msgs) > 0:
523
524
525
526
527
                raise RuntimeError(
                    "Error(s) in loading state_dict for {}:\n\t{}".format(
                        model.__class__.__name__, "\n\t".join(error_msgs)
                    )
                )
528

529
        model.tie_weights()  # make sure word embedding weights are still tied if needed
530

531
532
533
        # Set model in evaluation mode to desactivate DropOut modules by default
        model.eval()

thomwolf's avatar
thomwolf committed
534
535
536
537
        if output_loading_info:
            loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys, "error_msgs": error_msgs}
            return model, loading_info

538
539
        return model

thomwolf's avatar
thomwolf committed
540
541
542
    def prepare_inputs_for_generation(self, input_ids, **kwargs):
        return {"input_ids": input_ids}

thomwolf's avatar
thomwolf committed
543
    @torch.no_grad()
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
    def generate(
        self,
        input_ids=None,
        max_length=None,
        do_sample=None,
        num_beams=None,
        temperature=None,
        top_k=None,
        top_p=None,
        repetition_penalty=None,
        bos_token_id=None,
        pad_token_id=None,
        eos_token_ids=None,
        length_penalty=None,
        num_return_sequences=None,
    ):
thomwolf's avatar
thomwolf committed
560
561
562
563
        """ Sequence generator for models with a LM head.

        The method currently supports greedy or penalized greedy decoding, sampling with top-k or nucleus sampling
        and beam-search.
thomwolf's avatar
thomwolf committed
564

thomwolf's avatar
thomwolf committed
565
        Adapted in part from Facebook's XLM beam search code: https://github.com/facebookresearch/XLM
thomwolf's avatar
thomwolf committed
566
567
568
569
570

        Params:
            **input_ids**: (`optional`) `torch.LongTensor` of shape (1, sequence_length)
                The sequence used as a prompt for the generation. If `None` the method initializes
                it as an empty `torch.LongTensor` of shape (1,)
thomwolf's avatar
thomwolf committed
571
572
            **max_length**: (`optional`) int
                The max length of the sequence to be generated.  Between 1 and infinity. Default to 20.
thomwolf's avatar
thomwolf committed
573
            **do_sample**: (`optional`) bool
thomwolf's avatar
thomwolf committed
574
575
576
                If set to `False` we use greedy decoding; otherwise sampling. Default to greedy sampling.
            **num_beams**: (`optional`) int
                Number of beams for beam search. 1 means no beam serach. Default to 1.
thomwolf's avatar
thomwolf committed
577
578
            **temperature**: (`optional`) float
                The value used to module the next token probabilities.
thomwolf's avatar
thomwolf committed
579
580
581
582
            **top_k**: (`optional`) int
                The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.
            **top_p**: (`optional`) float
                The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.
thomwolf's avatar
thomwolf committed
583
            **repetition_penalty**: (`optional`) float
thomwolf's avatar
thomwolf committed
584
                The parameter for repetition penalty. Between 1.0 and + infinity. 1.0 means no penalty. Default to 1.
thomwolf's avatar
thomwolf committed
585
586
587
588
589
590
591
592
593
594
            **bos_token_id**: (`optional`) int
                Beginning of sentence token if no prompt is provided. Default to 0.
            **eos_token_ids**: (`optional`) int or list of int
                End of sequence token or list of tokens to stop the generation. Default to 0.
            **length_penalty**: (`optional`) int
                Exponential penalty to the length. Default to 0.
            **length_penalty**: (`optional`) float
                Exponential penalty to the length. Default to 1.
            **num_return_sequences**: (`optional`) int
                The number of independantly computed returned sequences for each element in the batch. Default to 1.
thomwolf's avatar
thomwolf committed
595
596
597
598
        """

        # We cannot generate if the model does not have a LM head
        if self.get_output_embeddings() is None:
599
600
601
602
            raise AttributeError(
                "You tried to generate sequences with a model that does not have a LM Head."
                "Please use another model class (e.g. `OpenAIGPTLMHeadModel`)"
            )
thomwolf's avatar
thomwolf committed
603

604
605
606
607
608
609
610
611
612
613
614
        max_length = max_length if max_length is not None else self.config.max_length
        do_sample = do_sample if do_sample is not None else self.config.do_sample
        num_beams = num_beams if num_beams is not None else self.config.num_beams
        temperature = temperature if temperature is not None else self.config.temperature
        top_k = top_k if top_k is not None else self.config.top_k
        top_p = top_p if top_p is not None else self.config.top_p
        repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty
        bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id
        pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id
        eos_token_ids = eos_token_ids if eos_token_ids is not None else self.config.eos_token_ids
        length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty
615
616
617
        num_return_sequences = (
            num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
        )
thomwolf's avatar
thomwolf committed
618
619
620

        if input_ids is not None:
            batch_size = input_ids.shape[0]  # overriden by the input batch_size
thomwolf's avatar
thomwolf committed
621
622
        else:
            batch_size = 1
thomwolf's avatar
thomwolf committed
623
624
625
        if isinstance(eos_token_ids, int):
            eos_token_ids = [eos_token_ids]

thomwolf's avatar
thomwolf committed
626
        assert isinstance(max_length, int) and max_length > 0, "`max_length` should be a strictely positive integer."
thomwolf's avatar
thomwolf committed
627
        assert isinstance(do_sample, bool), "`do_sample` should be a boolean."
thomwolf's avatar
thomwolf committed
628
        assert isinstance(num_beams, int) and num_beams > 0, "`num_beams` should be a strictely positive integer."
629
        # assert temperature >= 0, "`temperature` should be positive."
630
        assert isinstance(top_k, int) and top_k >= 0, "`top_k` should be a positive integer."
thomwolf's avatar
thomwolf committed
631
        assert 0 <= top_p <= 1, "`top_p` should be between 0 and 1."
thomwolf's avatar
thomwolf committed
632
633
634
        assert repetition_penalty >= 1.0, "`repetition_penalty` should be >= 1."
        assert isinstance(bos_token_id, int) and bos_token_id >= 0, "`bos_token_id` should be a positive integer."
        assert isinstance(pad_token_id, int) and pad_token_id >= 0, "`pad_token_id` should be a positive integer."
635
636
637
        assert isinstance(eos_token_ids, (list, tuple)) and (
            e >= 0 for e in eos_token_ids
        ), "`eos_token_ids` should be a positive integer or a list/tuple of positive integers."
thomwolf's avatar
thomwolf committed
638
        assert length_penalty > 0, "`length_penalty` should be strictely positive."
639
640
641
        assert (
            isinstance(num_return_sequences, int) and num_return_sequences > 0
        ), "`num_return_sequences` should be a strictely positive integer."
thomwolf's avatar
thomwolf committed
642
643

        if input_ids is None:
644
645
646
            input_ids = torch.full(
                (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device
            )
thomwolf's avatar
thomwolf committed
647
        else:
648
            assert input_ids.dim() == 2, "Input prompt should be of shape (batch_size, sequence length)."
thomwolf's avatar
thomwolf committed
649
650

        # current position and vocab size
thomwolf's avatar
thomwolf committed
651
        cur_len = input_ids.shape[1]
thomwolf's avatar
thomwolf committed
652
653
        vocab_size = self.config.vocab_size

thomwolf's avatar
thomwolf committed
654
655
656
        if num_return_sequences != 1:
            # Expand input to num return sequences
            input_ids = input_ids.unsqueeze(1).expand(batch_size, num_return_sequences, cur_len)
657
658
659
            input_ids = input_ids.contiguous().view(
                batch_size * num_return_sequences, cur_len
            )  # (batch_size * num_return_sequences, cur_len)
thomwolf's avatar
thomwolf committed
660
661
662
663
            effective_batch_size = batch_size * num_return_sequences
        else:
            effective_batch_size = batch_size

thomwolf's avatar
thomwolf committed
664
        if num_beams > 1:
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
            output = self._generate_beam_search(
                input_ids,
                cur_len,
                max_length,
                do_sample,
                temperature,
                top_k,
                top_p,
                repetition_penalty,
                pad_token_id,
                eos_token_ids,
                effective_batch_size,
                length_penalty,
                num_beams,
                vocab_size,
            )
thomwolf's avatar
thomwolf committed
681
        else:
682
683
684
685
686
687
688
689
690
691
692
693
694
            output = self._generate_no_beam_search(
                input_ids,
                cur_len,
                max_length,
                do_sample,
                temperature,
                top_k,
                top_p,
                repetition_penalty,
                pad_token_id,
                eos_token_ids,
                effective_batch_size,
            )
thomwolf's avatar
thomwolf committed
695
696
697
698

        if num_return_sequences != 1:
            output = output.view(batch_size, num_return_sequences, -1)
        return output
thomwolf's avatar
thomwolf committed
699

700
701
702
703
704
705
706
707
708
709
710
711
712
713
    def _generate_no_beam_search(
        self,
        input_ids,
        cur_len,
        max_length,
        do_sample,
        temperature,
        top_k,
        top_p,
        repetition_penalty,
        pad_token_id,
        eos_token_ids,
        batch_size,
    ):
thomwolf's avatar
thomwolf committed
714
        """ Generate sequences for each example without beam search (num_beams == 1).
715
716
            All returned sequence are generated independantly.
        """
thomwolf's avatar
thomwolf committed
717
        # current position / max lengths / length of generated sentences / unfinished sentences
thomwolf's avatar
thomwolf committed
718
        unfinished_sents = input_ids.new(batch_size).fill_(1)
thomwolf's avatar
thomwolf committed
719

720
        # TODO: add cached compute states
thomwolf's avatar
thomwolf committed
721
722
723
        pasts = None

        while cur_len < max_length:
724
            model_inputs = self.prepare_inputs_for_generation(input_ids, pasts=pasts)
thomwolf's avatar
thomwolf committed
725
726
727
728
729
            outputs = self(**model_inputs)
            next_token_logits = outputs[0][:, -1, :]

            # repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)
            if repetition_penalty != 1.0:
thomwolf's avatar
thomwolf committed
730
                for i in range(batch_size):
731
732
                    for previous_tokens in set(input_ids[i].tolist()):
                        next_token_logits[i, previous_tokens] /= repetition_penalty
thomwolf's avatar
thomwolf committed
733
734
735

            if do_sample:
                # Temperature (higher temperature => more likely to sample low probability tokens)
736
                if temperature > 0 and temperature != 1.0:
thomwolf's avatar
thomwolf committed
737
738
739
740
                    next_token_logits = next_token_logits / temperature
                # Top-p/top-k filtering
                next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)
                # Sample
741
                next_token = torch.multinomial(F.softmax(next_token_logits, dim=-1), num_samples=1).squeeze(1)
thomwolf's avatar
thomwolf committed
742
743
            else:
                # Greedy decoding
744
                next_token = torch.argmax(next_token_logits, dim=-1)
thomwolf's avatar
thomwolf committed
745
746
747

            # update generations and finished sentences
            tokens_to_add = next_token * unfinished_sents + pad_token_id * (1 - unfinished_sents)
748
            input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)
thomwolf's avatar
thomwolf committed
749
            for eos_token_id in eos_token_ids:
750
                unfinished_sents.mul_(tokens_to_add.ne(eos_token_id).long())
thomwolf's avatar
thomwolf committed
751
752
753
754
755
756
757
758
            cur_len = cur_len + 1

            # stop when there is a </s> in each sentence, or if we exceed the maximul length
            if unfinished_sents.max() == 0:
                break

        # add eos_token_ids to unfinished sentences
        if cur_len == max_length:
759
760
            input_ids[:, -1].masked_fill_(unfinished_sents.to(dtype=torch.bool), eos_token_ids[0])

thomwolf's avatar
thomwolf committed
761
762
        return input_ids

763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
    def _generate_beam_search(
        self,
        input_ids,
        cur_len,
        max_length,
        do_sample,
        temperature,
        top_k,
        top_p,
        repetition_penalty,
        pad_token_id,
        eos_token_ids,
        batch_size,
        length_penalty,
        num_beams,
        vocab_size,
    ):
thomwolf's avatar
thomwolf committed
780
        """ Generate sequences for each example with beam search.
781
        """
thomwolf's avatar
thomwolf committed
782
783
        # Expand input to num beams
        input_ids = input_ids.unsqueeze(1).expand(batch_size, num_beams, cur_len)
784
        input_ids = input_ids.contiguous().view(batch_size * num_beams, cur_len)  # (batch_size * num_beams, cur_len)
thomwolf's avatar
thomwolf committed
785
786

        # generated hypotheses
787
788
789
        generated_hyps = [
            BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=False) for _ in range(batch_size)
        ]
thomwolf's avatar
thomwolf committed
790
791
792
793

        # scores for each sentence in the beam
        beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)
        beam_scores[:, 1:] = -1e9
794
        beam_scores = beam_scores.view(-1)  # shape (batch_size * num_beams,)
thomwolf's avatar
thomwolf committed
795
796
797
798
799
800
801
802

        # cache compute states
        pasts = None  # self.prepare_pasts()

        # done sentences
        done = [False for _ in range(batch_size)]

        while cur_len < max_length:
803
            model_inputs = self.prepare_inputs_for_generation(input_ids, pasts=pasts)
804
805
            scores = self(**model_inputs)[0]  # (batch_size * num_beams, cur_len, vocab_size)
            scores = scores[:, -1, :]  # (batch_size * num_beams, vocab_size)
thomwolf's avatar
thomwolf committed
806

807
808
809
810
811
            # repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)
            if repetition_penalty != 1.0:
                for i in range(batch_size * num_beams):
                    for previous_tokens in set(input_ids[i].tolist()):
                        scores[i, previous_tokens] /= repetition_penalty
thomwolf's avatar
thomwolf committed
812

813
814
            if do_sample:
                # Temperature (higher temperature => more likely to sample low probability tokens)
815
                if temperature > 0 and temperature != 1.0:
816
817
                    scores = scores / temperature
                # Top-p/top-k filtering
818
819
820
                scores = top_k_top_p_filtering(
                    scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2
                )  # (batch_size * num_beams, vocab_size)
821
                # Sample 2 next words for each beam (so we have some spare tokens and match output of greedy beam search)
822
                next_words = torch.multinomial(F.softmax(scores, dim=-1), num_samples=2)  # (batch_size * num_beams, 2)
823
                # Compute next scores
824
825
826
                _scores = F.log_softmax(scores, dim=-1)  # (batch_size * num_beams, vocab_size)
                _scores = torch.gather(_scores, -1, next_words)  # (batch_size * num_beams, 2)
                next_scores = _scores + beam_scores[:, None].expand_as(_scores)  # (batch_size * num_beams, 2)
827
                # Match shape of greedy beam search
828
829
                next_words = next_words.view(batch_size, 2 * num_beams)  # (batch_size, 2 * num_beams)
                next_scores = next_scores.view(batch_size, 2 * num_beams)  # (batch_size, 2 * num_beams)
830
831
            else:
                # do greedy beam search
832
                scores = F.log_softmax(scores, dim=-1)  # (batch_size * num_beams, vocab_size)
833
834
                assert scores.size() == (batch_size * num_beams, vocab_size)
                # Add the log prob of the new beams to the log prob of the beginning of the sequence (sum of logs == log of the product)
835
                _scores = scores + beam_scores[:, None].expand_as(scores)  # (batch_size * num_beams, vocab_size)
836
                # re-organize to group the beam together (we are keeping top hypothesis accross beams)
837
838
                _scores = _scores.view(batch_size, num_beams * vocab_size)  # (batch_size, num_beams * vocab_size)
                next_scores, next_words = torch.topk(_scores, 2 * num_beams, dim=1, largest=True, sorted=True)
thomwolf's avatar
thomwolf committed
839
840
841
842
843
844
845
846

            assert next_scores.size() == next_words.size() == (batch_size, 2 * num_beams)

            # next batch beam content
            # list of (batch_size * num_beams) tuple(next hypothesis score, next word, current position in the batch)
            next_batch_beam = []

            # for each sentence
thomwolf's avatar
thomwolf committed
847
            for batch_ex in range(batch_size):
thomwolf's avatar
thomwolf committed
848
849

                # if we are done with this sentence
thomwolf's avatar
thomwolf committed
850
851
                done[batch_ex] = done[batch_ex] or generated_hyps[batch_ex].is_done(next_scores[batch_ex].max().item())
                if done[batch_ex]:
thomwolf's avatar
thomwolf committed
852
853
854
855
856
857
858
                    next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams)  # pad the batch
                    continue

                # next sentence beam content
                next_sent_beam = []

                # next words for this sentence
thomwolf's avatar
thomwolf committed
859
                for idx, score in zip(next_words[batch_ex], next_scores[batch_ex]):
thomwolf's avatar
thomwolf committed
860
861
862
863
864
865
866

                    # get beam and word IDs
                    beam_id = idx // vocab_size
                    word_id = idx % vocab_size

                    # end of sentence, or next word
                    if word_id.item() in eos_token_ids or cur_len + 1 == max_length:
867
868
869
                        generated_hyps[batch_ex].add(
                            input_ids[batch_ex * num_beams + beam_id, :cur_len].clone(), score.item()
                        )
thomwolf's avatar
thomwolf committed
870
                    else:
thomwolf's avatar
thomwolf committed
871
                        next_sent_beam.append((score, word_id, batch_ex * num_beams + beam_id))
thomwolf's avatar
thomwolf committed
872
873
874
875
876
877
878
879
880
881

                    # the beam for next step is full
                    if len(next_sent_beam) == num_beams:
                        break

                # update next beam content
                assert len(next_sent_beam) == 0 if cur_len + 1 == max_length else num_beams
                if len(next_sent_beam) == 0:
                    next_sent_beam = [(0, pad_token_id, 0)] * num_beams  # pad the batch
                next_batch_beam.extend(next_sent_beam)
thomwolf's avatar
thomwolf committed
882
                assert len(next_batch_beam) == num_beams * (batch_ex + 1)
thomwolf's avatar
thomwolf committed
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914

            # sanity check / prepare next batch
            assert len(next_batch_beam) == batch_size * num_beams
            beam_scores = beam_scores.new([x[0] for x in next_batch_beam])
            beam_words = input_ids.new([x[1] for x in next_batch_beam])
            beam_idx = input_ids.new([x[2] for x in next_batch_beam])

            # re-order batch and internal states
            input_ids = input_ids[beam_idx, :]
            input_ids = torch.cat([input_ids, beam_words.unsqueeze(1)], dim=-1)
            # TODO: Activate cache
            # for k in cache.keys():
            #     if k != 'slen':
            #         cache[k] = (cache[k][0][beam_idx], cache[k][1][beam_idx])

            # update current length
            cur_len = cur_len + 1

            # stop when we are done with each sentence
            if all(done):
                break

        # visualize hypotheses
        # print([len(x) for x in generated_hyps], cur_len)
        # globals().update( locals() );
        # !import code; code.interact(local=vars())
        # for ii in range(batch_size):
        #     for ss, ww in sorted(generated_hyps[ii].hyp, key=lambda x: x[0], reverse=True):
        #         print("%.3f " % ss + " ".join(self.dico[x] for x in ww.tolist()))
        #     print("")

        # select the best hypotheses
thomwolf's avatar
thomwolf committed
915
916
        tgt_len = input_ids.new(batch_size)
        best = []
thomwolf's avatar
thomwolf committed
917
918

        for i, hypotheses in enumerate(generated_hyps):
thomwolf's avatar
thomwolf committed
919
920
921
            best_hyp = max(hypotheses.hyp, key=lambda x: x[0])[1]
            tgt_len[i] = len(best_hyp) + 1  # +1 for the <EOS> symbol
            best.append(best_hyp)
thomwolf's avatar
thomwolf committed
922
923

        # generate target batch
thomwolf's avatar
thomwolf committed
924
925
        decoded = input_ids.new(batch_size, tgt_len.max().item()).fill_(pad_token_id)
        for i, hypo in enumerate(best):
926
            decoded[i, : tgt_len[i] - 1] = hypo
thomwolf's avatar
thomwolf committed
927
            decoded[i, tgt_len[i] - 1] = eos_token_ids[0]
thomwolf's avatar
thomwolf committed
928

thomwolf's avatar
thomwolf committed
929
930
931
        return decoded


932
def top_k_top_p_filtering(logits, top_k=0, top_p=1.0, filter_value=-float("Inf"), min_tokens_to_keep=1):
thomwolf's avatar
thomwolf committed
933
934
    """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
        Args:
thomwolf's avatar
thomwolf committed
935
            logits: logits distribution shape (batch size, vocabulary size)
936
937
            if top_k > 0: keep only top k tokens with highest probability (top-k filtering).
            if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering).
thomwolf's avatar
thomwolf committed
938
                Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
thomwolf's avatar
thomwolf committed
939
            Make sure we keep at least min_tokens_to_keep per batch example in the output
thomwolf's avatar
thomwolf committed
940
941
942
        From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
    """
    if top_k > 0:
thomwolf's avatar
thomwolf committed
943
        top_k = min(max(top_k, min_tokens_to_keep), logits.size(-1))  # Safety check
thomwolf's avatar
thomwolf committed
944
945
946
947
        # Remove all tokens with a probability less than the last token of the top-k
        indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
        logits[indices_to_remove] = filter_value

948
    if top_p < 1.0:
thomwolf's avatar
thomwolf committed
949
950
951
        sorted_logits, sorted_indices = torch.sort(logits, descending=True)
        cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)

thomwolf's avatar
thomwolf committed
952
        # Remove tokens with cumulative probability above the threshold (token with 0 are kept)
thomwolf's avatar
thomwolf committed
953
        sorted_indices_to_remove = cumulative_probs > top_p
thomwolf's avatar
thomwolf committed
954
955
956
        if min_tokens_to_keep > 1:
            # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below)
            sorted_indices_to_remove[..., :min_tokens_to_keep] = 0
thomwolf's avatar
thomwolf committed
957
958
959
960
961
962
963
964
        # Shift the indices to the right to keep also the first token above the threshold
        sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
        sorted_indices_to_remove[..., 0] = 0

        # scatter sorted tensors to original indexing
        indices_to_remove = sorted_indices_to_remove.scatter(dim=1, index=sorted_indices, src=sorted_indices_to_remove)
        logits[indices_to_remove] = filter_value
    return logits
thomwolf's avatar
thomwolf committed
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983


class BeamHypotheses(object):
    def __init__(self, n_hyp, max_length, length_penalty, early_stopping):
        """
        Initialize n-best list of hypotheses.
        """
        self.max_length = max_length - 1  # ignoring bos_token
        self.length_penalty = length_penalty
        self.early_stopping = early_stopping
        self.n_hyp = n_hyp
        self.hyp = []
        self.worst_score = 1e9

    def __len__(self):
        """
        Number of hypotheses in the list.
        """
        return len(self.hyp)
thomwolf's avatar
thomwolf committed
984

thomwolf's avatar
thomwolf committed
985
986
987
988
989
990
991
992
993
994
995
996
997
    def add(self, hyp, sum_logprobs):
        """
        Add a new hypothesis to the list.
        """
        score = sum_logprobs / len(hyp) ** self.length_penalty
        if len(self) < self.n_hyp or score > self.worst_score:
            self.hyp.append((score, hyp))
            if len(self) > self.n_hyp:
                sorted_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.hyp)])
                del self.hyp[sorted_scores[0][1]]
                self.worst_score = sorted_scores[1][0]
            else:
                self.worst_score = min(score, self.worst_score)
thomwolf's avatar
thomwolf committed
998

thomwolf's avatar
thomwolf committed
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
    def is_done(self, best_sum_logprobs):
        """
        If there are enough hypotheses and that none of the hypotheses being generated
        can become better than the worst one in the heap, then we are done with this sentence.
        """
        if len(self) < self.n_hyp:
            return False
        elif self.early_stopping:
            return True
        else:
            return self.worst_score >= best_sum_logprobs / self.max_length ** self.length_penalty
thomwolf's avatar
thomwolf committed
1010
1011


thomwolf's avatar
thomwolf committed
1012
1013
class Conv1D(nn.Module):
    def __init__(self, nf, nx):
thomwolf's avatar
thomwolf committed
1014
        """ Conv1D layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2)
thomwolf's avatar
thomwolf committed
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
            Basically works like a Linear layer but the weights are transposed
        """
        super(Conv1D, self).__init__()
        self.nf = nf
        w = torch.empty(nx, nf)
        nn.init.normal_(w, std=0.02)
        self.weight = nn.Parameter(w)
        self.bias = nn.Parameter(torch.zeros(nf))

    def forward(self, x):
        size_out = x.size()[:-1] + (self.nf,)
        x = torch.addmm(self.bias, x.view(-1, x.size(-1)), self.weight)
        x = x.view(*size_out)
        return x


thomwolf's avatar
thomwolf committed
1031
1032
class PoolerStartLogits(nn.Module):
    """ Compute SQuAD start_logits from sequence hidden states. """
1033

thomwolf's avatar
thomwolf committed
1034
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
1035
1036
1037
1038
1039
        super(PoolerStartLogits, self).__init__()
        self.dense = nn.Linear(config.hidden_size, 1)

    def forward(self, hidden_states, p_mask=None):
        """ Args:
1040
1041
1042
            **p_mask**: (`optional`) ``torch.FloatTensor`` of shape `(batch_size, seq_len)`
                invalid position mask such as query and special symbols (PAD, SEP, CLS)
                1.0 means token should be masked.
thomwolf's avatar
thomwolf committed
1043
        """
thomwolf's avatar
thomwolf committed
1044
1045
1046
        x = self.dense(hidden_states).squeeze(-1)

        if p_mask is not None:
1047
1048
1049
1050
            if next(self.parameters()).dtype == torch.float16:
                x = x * (1 - p_mask) - 65500 * p_mask
            else:
                x = x * (1 - p_mask) - 1e30 * p_mask
thomwolf's avatar
thomwolf committed
1051
1052
1053
1054
1055
1056
1057

        return x


class PoolerEndLogits(nn.Module):
    """ Compute SQuAD end_logits from sequence hidden states and start token hidden state.
    """
1058

thomwolf's avatar
thomwolf committed
1059
1060
1061
1062
1063
1064
1065
1066
1067
    def __init__(self, config):
        super(PoolerEndLogits, self).__init__()
        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
        self.activation = nn.Tanh()
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dense_1 = nn.Linear(config.hidden_size, 1)

    def forward(self, hidden_states, start_states=None, start_positions=None, p_mask=None):
        """ Args:
1068
1069
1070
1071
1072
1073
            One of ``start_states``, ``start_positions`` should be not None.
            If both are set, ``start_positions`` overrides ``start_states``.

            **start_states**: ``torch.LongTensor`` of shape identical to hidden_states
                hidden states of the first tokens for the labeled span.
            **start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
1074
                position of the first token for the labeled span:
1075
1076
1077
            **p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
                Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
                1.0 means token should be masked.
thomwolf's avatar
thomwolf committed
1078
        """
1079
1080
1081
        assert (
            start_states is not None or start_positions is not None
        ), "One of start_states, start_positions should be not None"
thomwolf's avatar
thomwolf committed
1082
        if start_positions is not None:
1083
            slen, hsz = hidden_states.shape[-2:]
1084
1085
1086
            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            start_states = hidden_states.gather(-2, start_positions)  # shape (bsz, 1, hsz)
            start_states = start_states.expand(-1, slen, -1)  # shape (bsz, slen, hsz)
thomwolf's avatar
thomwolf committed
1087
1088
1089
1090
1091
1092
1093

        x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
        x = self.activation(x)
        x = self.LayerNorm(x)
        x = self.dense_1(x).squeeze(-1)

        if p_mask is not None:
1094
1095
1096
1097
            if next(self.parameters()).dtype == torch.float16:
                x = x * (1 - p_mask) - 65500 * p_mask
            else:
                x = x * (1 - p_mask) - 1e30 * p_mask
thomwolf's avatar
thomwolf committed
1098
1099
1100
1101
1102
1103

        return x


class PoolerAnswerClass(nn.Module):
    """ Compute SQuAD 2.0 answer class from classification and start tokens hidden states. """
1104

thomwolf's avatar
thomwolf committed
1105
1106
1107
1108
1109
1110
1111
    def __init__(self, config):
        super(PoolerAnswerClass, self).__init__()
        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
        self.activation = nn.Tanh()
        self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)

    def forward(self, hidden_states, start_states=None, start_positions=None, cls_index=None):
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
        """
        Args:
            One of ``start_states``, ``start_positions`` should be not None.
            If both are set, ``start_positions`` overrides ``start_states``.

            **start_states**: ``torch.LongTensor`` of shape identical to ``hidden_states``.
                hidden states of the first tokens for the labeled span.
            **start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
                position of the first token for the labeled span.
            **cls_index**: torch.LongTensor of shape ``(batch_size,)``
                position of the CLS token. If None, take the last token.

            note(Original repo):
                no dependency on end_feature so that we can obtain one single `cls_logits`
                for each sample
thomwolf's avatar
thomwolf committed
1127
        """
1128
        hsz = hidden_states.shape[-1]
1129
1130
1131
        assert (
            start_states is not None or start_positions is not None
        ), "One of start_states, start_positions should be not None"
thomwolf's avatar
thomwolf committed
1132
        if start_positions is not None:
1133
1134
            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            start_states = hidden_states.gather(-2, start_positions).squeeze(-2)  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
1135
1136

        if cls_index is not None:
1137
1138
            cls_index = cls_index[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2)  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
1139
        else:
1140
            cls_token_state = hidden_states[:, -1, :]  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
1141
1142
1143
1144
1145
1146
1147
1148
1149

        x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
        x = self.activation(x)
        x = self.dense_1(x).squeeze(-1)

        return x


class SQuADHead(nn.Module):
1150
1151
1152
    r""" A SQuAD head inspired by XLNet.

    Parameters:
1153
        config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model.
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172

    Inputs:
        **hidden_states**: ``torch.FloatTensor`` of shape ``(batch_size, seq_len, hidden_size)``
            hidden states of sequence tokens
        **start_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
            position of the first token for the labeled span.
        **end_positions**: ``torch.LongTensor`` of shape ``(batch_size,)``
            position of the last token for the labeled span.
        **cls_index**: torch.LongTensor of shape ``(batch_size,)``
            position of the CLS token. If None, take the last token.
        **is_impossible**: ``torch.LongTensor`` of shape ``(batch_size,)``
            Whether the question has a possible answer in the paragraph or not.
        **p_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, seq_len)``
            Mask of invalid position such as query and special symbols (PAD, SEP, CLS)
            1.0 means token should be masked.

    Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
        **loss**: (`optional`, returned if both ``start_positions`` and ``end_positions`` are provided) ``torch.FloatTensor`` of shape ``(1,)``:
            Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses.
thomwolf's avatar
thomwolf committed
1173
        **start_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
1174
1175
            ``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``
            Log probabilities for the top config.start_n_top start token possibilities (beam-search).
thomwolf's avatar
thomwolf committed
1176
        **start_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
1177
1178
            ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``
            Indices for the top config.start_n_top start token possibilities (beam-search).
thomwolf's avatar
thomwolf committed
1179
        **end_top_log_probs**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
1180
1181
            ``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
            Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
thomwolf's avatar
thomwolf committed
1182
        **end_top_index**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
1183
1184
            ``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``
            Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search).
thomwolf's avatar
thomwolf committed
1185
        **cls_logits**: (`optional`, returned if ``start_positions`` or ``end_positions`` is not provided)
1186
1187
            ``torch.FloatTensor`` of shape ``(batch_size,)``
            Log probabilities for the ``is_impossible`` label of the answers.
thomwolf's avatar
thomwolf committed
1188
    """
1189

thomwolf's avatar
thomwolf committed
1190
1191
1192
1193
1194
1195
1196
1197
1198
    def __init__(self, config):
        super(SQuADHead, self).__init__()
        self.start_n_top = config.start_n_top
        self.end_n_top = config.end_n_top

        self.start_logits = PoolerStartLogits(config)
        self.end_logits = PoolerEndLogits(config)
        self.answer_class = PoolerAnswerClass(config)

1199
1200
1201
    def forward(
        self, hidden_states, start_positions=None, end_positions=None, cls_index=None, is_impossible=None, p_mask=None
    ):
thomwolf's avatar
thomwolf committed
1202
1203
        outputs = ()

thomwolf's avatar
thomwolf committed
1204
        start_logits = self.start_logits(hidden_states, p_mask=p_mask)
thomwolf's avatar
thomwolf committed
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227

        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, let's remove the dimension added by batch splitting
            for x in (start_positions, end_positions, cls_index, is_impossible):
                if x is not None and x.dim() > 1:
                    x.squeeze_(-1)

            # during training, compute the end logits based on the ground truth of the start position
            end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)

            loss_fct = CrossEntropyLoss()
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2

            if cls_index is not None and is_impossible is not None:
                # Predict answerability from the representation of CLS and START
                cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
                loss_fct_cls = nn.BCEWithLogitsLoss()
                cls_loss = loss_fct_cls(cls_logits, is_impossible)

                # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
                total_loss += cls_loss * 0.5
1228
1229

            outputs = (total_loss,) + outputs
thomwolf's avatar
thomwolf committed
1230
1231
1232
1233

        else:
            # during inference, compute the end logits based on beam search
            bsz, slen, hsz = hidden_states.size()
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
            start_log_probs = F.softmax(start_logits, dim=-1)  # shape (bsz, slen)

            start_top_log_probs, start_top_index = torch.topk(
                start_log_probs, self.start_n_top, dim=-1
            )  # shape (bsz, start_n_top)
            start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz)  # shape (bsz, start_n_top, hsz)
            start_states = torch.gather(hidden_states, -2, start_top_index_exp)  # shape (bsz, start_n_top, hsz)
            start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1)  # shape (bsz, slen, start_n_top, hsz)

            hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
                start_states
            )  # shape (bsz, slen, start_n_top, hsz)
thomwolf's avatar
thomwolf committed
1246
1247
            p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
            end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
1248
            end_log_probs = F.softmax(end_logits, dim=1)  # shape (bsz, slen, start_n_top)
thomwolf's avatar
thomwolf committed
1249

1250
1251
1252
            end_top_log_probs, end_top_index = torch.topk(
                end_log_probs, self.end_n_top, dim=1
            )  # shape (bsz, end_n_top, start_n_top)
thomwolf's avatar
thomwolf committed
1253
1254
1255
1256
1257
1258
1259
1260
1261
            end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
            end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)

            start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
            cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)

            outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) + outputs

        # return start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits
1262
        # or (if labels are provided) (total_loss,)
thomwolf's avatar
thomwolf committed
1263
1264
1265
1266
        return outputs


class SequenceSummary(nn.Module):
thomwolf's avatar
thomwolf committed
1267
    r""" Compute a single vector summary of a sequence hidden states according to various possibilities:
thomwolf's avatar
thomwolf committed
1268
1269
1270
1271
1272
        Args of the config class:
            summary_type:
                - 'last' => [default] take the last token hidden state (like XLNet)
                - 'first' => take the first token hidden state (like Bert)
                - 'mean' => take the mean of all tokens hidden states
thomwolf's avatar
thomwolf committed
1273
                - 'cls_index' => supply a Tensor of classification token position (GPT/GPT-2)
thomwolf's avatar
thomwolf committed
1274
1275
                - 'attn' => Not implemented now, use multi-head attention
            summary_use_proj: Add a projection after the vector extraction
1276
            summary_proj_to_labels: If True, the projection outputs to config.num_labels classes (otherwise to hidden_size). Default: False.
1277
            summary_activation: 'tanh' => add a tanh activation to the output, Other => no activation. Default
1278
1279
            summary_first_dropout: Add a dropout before the projection and activation
            summary_last_dropout: Add a dropout after the projection and activation
thomwolf's avatar
thomwolf committed
1280
    """
1281

thomwolf's avatar
thomwolf committed
1282
    def __init__(self, config):
thomwolf's avatar
thomwolf committed
1283
1284
        super(SequenceSummary, self).__init__()

1285
1286
        self.summary_type = config.summary_type if hasattr(config, "summary_type") else "last"
        if self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
1287
1288
1289
1290
1291
            # We should use a standard multi-head attention module with absolute positional embedding for that.
            # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
            # We can probably just use the multi-head attention module of PyTorch >=1.1.0
            raise NotImplementedError

thomwolf's avatar
thomwolf committed
1292
        self.summary = Identity()
1293
1294
        if hasattr(config, "summary_use_proj") and config.summary_use_proj:
            if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
1295
                num_classes = config.num_labels
thomwolf's avatar
thomwolf committed
1296
1297
1298
1299
            else:
                num_classes = config.hidden_size
            self.summary = nn.Linear(config.hidden_size, num_classes)

thomwolf's avatar
thomwolf committed
1300
        self.activation = Identity()
1301
        if hasattr(config, "summary_activation") and config.summary_activation == "tanh":
thomwolf's avatar
thomwolf committed
1302
1303
            self.activation = nn.Tanh()

thomwolf's avatar
thomwolf committed
1304
        self.first_dropout = Identity()
1305
        if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
1306
1307
            self.first_dropout = nn.Dropout(config.summary_first_dropout)

thomwolf's avatar
thomwolf committed
1308
        self.last_dropout = Identity()
1309
        if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
1310
            self.last_dropout = nn.Dropout(config.summary_last_dropout)
thomwolf's avatar
thomwolf committed
1311

thomwolf's avatar
thomwolf committed
1312
    def forward(self, hidden_states, cls_index=None):
1313
        """ hidden_states: float Tensor in shape [bsz, ..., seq_len, hidden_size], the hidden-states of the last layer.
thomwolf's avatar
thomwolf committed
1314
            cls_index: [optional] position of the classification token if summary_type == 'cls_index',
thomwolf's avatar
thomwolf committed
1315
                shape (bsz,) or more generally (bsz, ...) where ... are optional leading dimensions of hidden_states.
thomwolf's avatar
thomwolf committed
1316
                if summary_type == 'cls_index' and cls_index is None:
thomwolf's avatar
thomwolf committed
1317
1318
                    we take the last token of the sequence as classification token
        """
1319
        if self.summary_type == "last":
thomwolf's avatar
thomwolf committed
1320
            output = hidden_states[:, -1]
1321
        elif self.summary_type == "first":
thomwolf's avatar
thomwolf committed
1322
            output = hidden_states[:, 0]
1323
        elif self.summary_type == "mean":
thomwolf's avatar
thomwolf committed
1324
            output = hidden_states.mean(dim=1)
1325
        elif self.summary_type == "cls_index":
thomwolf's avatar
thomwolf committed
1326
            if cls_index is None:
1327
                cls_index = torch.full_like(hidden_states[..., :1, :], hidden_states.shape[-2] - 1, dtype=torch.long)
thomwolf's avatar
thomwolf committed
1328
            else:
thomwolf's avatar
thomwolf committed
1329
                cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
1330
                cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
thomwolf's avatar
thomwolf committed
1331
            # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
1332
1333
            output = hidden_states.gather(-2, cls_index).squeeze(-2)  # shape (bsz, XX, hidden_size)
        elif self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
1334
1335
            raise NotImplementedError

1336
        output = self.first_dropout(output)
thomwolf's avatar
thomwolf committed
1337
1338
        output = self.summary(output)
        output = self.activation(output)
1339
        output = self.last_dropout(output)
thomwolf's avatar
thomwolf committed
1340
1341
1342
1343

        return output


1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
def prune_linear_layer(layer, index, dim=0):
    """ Prune a linear layer (a model parameters) to keep only entries in index.
        Return the pruned layer as a new layer with requires_grad=True.
        Used to remove heads.
    """
    index = index.to(layer.weight.device)
    W = layer.weight.index_select(dim, index).clone().detach()
    if layer.bias is not None:
        if dim == 1:
            b = layer.bias.clone().detach()
        else:
            b = layer.bias[index].clone().detach()
    new_size = list(layer.weight.size())
    new_size[dim] = len(index)
    new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)
    new_layer.weight.requires_grad = False
    new_layer.weight.copy_(W.contiguous())
    new_layer.weight.requires_grad = True
    if layer.bias is not None:
        new_layer.bias.requires_grad = False
        new_layer.bias.copy_(b.contiguous())
        new_layer.bias.requires_grad = True
    return new_layer


def prune_conv1d_layer(layer, index, dim=1):
    """ Prune a Conv1D layer (a model parameters) to keep only entries in index.
        A Conv1D work as a Linear layer (see e.g. BERT) but the weights are transposed.
        Return the pruned layer as a new layer with requires_grad=True.
        Used to remove heads.
    """
    index = index.to(layer.weight.device)
    W = layer.weight.index_select(dim, index).clone().detach()
    if dim == 0:
        b = layer.bias.clone().detach()
    else:
        b = layer.bias[index].clone().detach()
    new_size = list(layer.weight.size())
    new_size[dim] = len(index)
    new_layer = Conv1D(new_size[1], new_size[0]).to(layer.weight.device)
    new_layer.weight.requires_grad = False
    new_layer.weight.copy_(W.contiguous())
    new_layer.weight.requires_grad = True
    new_layer.bias.requires_grad = False
    new_layer.bias.copy_(b.contiguous())
    new_layer.bias.requires_grad = True
    return new_layer
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403


def prune_layer(layer, index, dim=None):
    """ Prune a Conv1D or nn.Linear layer (a model parameters) to keep only entries in index.
        Return the pruned layer as a new layer with requires_grad=True.
        Used to remove heads.
    """
    if isinstance(layer, nn.Linear):
        return prune_linear_layer(layer, index, dim=0 if dim is None else dim)
    elif isinstance(layer, Conv1D):
        return prune_conv1d_layer(layer, index, dim=1 if dim is None else dim)
    else:
        raise ValueError("Can't prune layer of class {}".format(layer.__class__))