modeling_utils.py 126 KB
Newer Older
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Sylvain Gugger's avatar
Sylvain Gugger committed
17
import json
18
import os
19
import re
Sylvain Gugger's avatar
Sylvain Gugger committed
20
21
import shutil
import tempfile
22
from contextlib import contextmanager
23
from dataclasses import dataclass
24
from functools import partial
Sylvain Gugger's avatar
Sylvain Gugger committed
25
from pathlib import Path
26
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
27
28

import torch
29
from torch import Tensor, device, nn
30
from torch.nn import CrossEntropyLoss
31

32
33
from requests import HTTPError

34
from .activations import get_activation
35
from .configuration_utils import PretrainedConfig
36
from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled
37
from .dynamic_module_utils import custom_object_save
38
from .generation_utils import GenerationMixin
39
40
41
42
43
44
45
46
from .pytorch_utils import (  # noqa: F401
    Conv1D,
    apply_chunking_to_forward,
    find_pruneable_heads_and_indices,
    prune_conv1d_layer,
    prune_layer,
    prune_linear_layer,
)
47
from .utils import (
Aymeric Augustin's avatar
Aymeric Augustin committed
48
    DUMMY_INPUTS,
49
    FLAX_WEIGHTS_NAME,
50
    HUGGINGFACE_CO_RESOLVE_ENDPOINT,
51
52
    TF2_WEIGHTS_NAME,
    TF_WEIGHTS_NAME,
Sylvain Gugger's avatar
Sylvain Gugger committed
53
    WEIGHTS_INDEX_NAME,
54
    WEIGHTS_NAME,
55
    EntryNotFoundError,
56
    ModelOutput,
Sylvain Gugger's avatar
Sylvain Gugger committed
57
    PushToHubMixin,
58
59
    RepositoryNotFoundError,
    RevisionNotFoundError,
60
    cached_path,
61
    has_file,
62
    hf_bucket_url,
63
    is_offline_mode,
64
    is_remote_url,
65
    logging,
Sylvain Gugger's avatar
Sylvain Gugger committed
66
    replace_return_docstrings,
67
)
68
from .utils.versions import require_version_core
69

Aymeric Augustin's avatar
Aymeric Augustin committed
70

Lysandre Debut's avatar
Lysandre Debut committed
71
logger = logging.get_logger(__name__)
72

73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92

_init_weights = True


@contextmanager
def no_init_weights(_enable=True):
    """
    Context manager to globally disable weight initialization to speed up loading large models.

    TODO(Patrick): Delete safety argument `_enable=True` at next major version. .
    """
    global _init_weights
    if _enable:
        _init_weights = False
    try:
        yield
    finally:
        _init_weights = True


thomwolf's avatar
thomwolf committed
93
94
95
96
97
try:
    from torch.nn import Identity
except ImportError:
    # Older PyTorch compatibility
    class Identity(nn.Module):
Lysandre's avatar
Lysandre committed
98
        r"""A placeholder identity operator that is argument-insensitive."""
99

thomwolf's avatar
thomwolf committed
100
        def __init__(self, *args, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
101
            super().__init__()
thomwolf's avatar
thomwolf committed
102
103
104
105

        def forward(self, input):
            return input

106

Lysandre Debut's avatar
Lysandre Debut committed
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
def get_parameter_device(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]):
    try:
        return next(parameter.parameters()).device
    except StopIteration:
        # For nn.DataParallel compatibility in PyTorch 1.5

        def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
            tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
            return tuples

        gen = parameter._named_members(get_members_fn=find_tensor_attributes)
        first_tuple = next(gen)
        return first_tuple[1].device


def get_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]):
    try:
        return next(parameter.parameters()).dtype
    except StopIteration:
        # For nn.DataParallel compatibility in PyTorch 1.5

        def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
            tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
            return tuples

        gen = parameter._named_members(get_members_fn=find_tensor_attributes)
        first_tuple = next(gen)
        return first_tuple[1].dtype


Sylvain Gugger's avatar
Sylvain Gugger committed
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
def convert_file_size_to_int(size: Union[int, str]):
    """
    Converts a size expressed as a string with digits an unit (like `"5MB"`) to an integer (in bytes).

    Args:
        size (`int` or `str`): The size to convert. Will be directly returned if an `int`.

    Example:

    ```py
    >>> convert_file_size_to_int("1MB")
    1048576
    ```
    """
    if isinstance(size, int):
        return size
    if size.upper().endswith("GIB"):
        return int(size[:-3]) * (2**30)
    if size.upper().endswith("MIB"):
        return int(size[:-3]) * (2**20)
    if size.upper().endswith("KIB"):
        return int(size[:-3]) * (2**10)
    if size.upper().endswith("GB"):
        return int(size[:-2]) * (10**9)
    if size.upper().endswith("MB"):
        return int(size[:-2]) * (10**6)
    if size.upper().endswith("KB"):
        return int(size[:-2]) * (10**3)
    raise ValueError("`size` is not in a valid format. Use an integer followed by the unit, e.g., '5GB'.")


def dtype_byte_size(dtype):
    """
    Returns the size (in bytes) occupied by one parameter of type `dtype`.

    Example:

    ```py
    >>> dtype_byte_size(torch.float32)
    4
    ```
    """
    if dtype == torch.bool:
        return 1 / 8
    bit_search = re.search("[^\d](\d+)$", str(dtype))
    if bit_search is None:
        raise ValueError(f"`dtype` is not a valid dtype: {dtype}.")
    bit_size = int(bit_search.groups()[0])
    return bit_size // 8


def shard_checkpoint(state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB"):
    """
    Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a
    given size.

    The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no
    optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the
    limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB],
    [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB].

    <Tip warning={true}>

    If one of the model's weight is bigger that `max_sahrd_size`, it will end up in its own sub-checkpoint which will
    have a size greater than `max_shard_size`.

    </Tip>

    Args:
        state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save.
        max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
            The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit
            (like `"5MB"`).
    """
    max_shard_size = convert_file_size_to_int(max_shard_size)

    sharded_state_dicts = []
    current_block = {}
    current_block_size = 0
    total_size = 0

    for key, weight in state_dict.items():
        weight_size = weight.numel() * dtype_byte_size(weight.dtype)

        # If this weight is going to tip up over the maximal size, we split.
        if current_block_size + weight_size > max_shard_size:
            sharded_state_dicts.append(current_block)
            current_block = {}
            current_block_size = 0

        current_block[key] = weight
        current_block_size += weight_size
        total_size += weight_size

    # Add the last block
    sharded_state_dicts.append(current_block)

    # If we only have one shard, we return it
    if len(sharded_state_dicts) == 1:
        return {WEIGHTS_NAME: sharded_state_dicts[0]}, None

    # Otherwise, let's build the index
    weight_map = {}
    shards = {}
    for idx, shard in enumerate(sharded_state_dicts):
        shard_file = WEIGHTS_NAME.replace(".bin", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.bin")
        shards[shard_file] = shard
        for key in shard.keys():
            weight_map[key] = shard_file

    # Add the metadata
    metadata = {"total_size": total_size}
    index = {"metadata": metadata, "weight_map": weight_map}
    return shards, index


def get_checkpoint_shard_files(
    pretrained_model_name_or_path,
    index_filename,
    cache_dir=None,
    force_download=False,
    proxies=None,
    resume_download=False,
    local_files_only=False,
    use_auth_token=None,
    user_agent=None,
    revision=None,
    mirror=None,
):
    """
    For a given model:

    - download and cache all the shards of a sharded checkpoint if `pretrained_model_name_or_path` is a model ID on the
      Hub
    - returns the list of paths to all the shards, as well as some metadata.

    For the description of each arg, see [`PreTrainedModel.from_pretrained`]. `index_filename` is the full path to the
    index (downloaded and cached if `pretrained_model_name_or_path` is a model ID on the Hub).
    """
    with open(index_filename, "r") as f:
        index = json.loads(f.read())

    shard_filenames = sorted(list(set(index["weight_map"].values())))
    sharded_metadata = index["metadata"]
    sharded_metadata["all_checkpoint_keys"] = list(index["weight_map"].keys())

    # First, let's deal with local folder.
    if os.path.isdir(pretrained_model_name_or_path):
        shard_filenames = [os.path.join(pretrained_model_name_or_path, f) for f in shard_filenames]
        return shard_filenames, sharded_metadata

    # At this stage pretrained_model_name_or_path is a model identifier on the Hub
    cached_filenames = []
    for shard_filename in shard_filenames:
        shard_url = hf_bucket_url(
            pretrained_model_name_or_path, filename=shard_filename, revision=revision, mirror=mirror
        )

        try:
            # Load from URL
            cached_filename = cached_path(
                shard_url,
                cache_dir=cache_dir,
                force_download=force_download,
                proxies=proxies,
                resume_download=resume_download,
                local_files_only=local_files_only,
                use_auth_token=use_auth_token,
                user_agent=user_agent,
            )
        # We have already dealt with RepositoryNotFoundError and RevisionNotFoundError when getting the index, so
        # we don't have to catch them here.
        except EntryNotFoundError:
            raise EnvironmentError(
                f"{pretrained_model_name_or_path} does not appear to have a file named {shard_filename} which is "
                "required according to the checkpoint index."
            )
        except HTTPError:
            raise EnvironmentError(
316
                f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load {shard_filename}. You should try again "
Sylvain Gugger's avatar
Sylvain Gugger committed
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
                "after checking your internet connection."
            )

        cached_filenames.append(cached_filename)

    return cached_filenames, sharded_metadata


def load_state_dict(checkpoint_file: Union[str, os.PathLike]):
    """
    Reads a PyTorch checkpoint file, returning properly formatted errors if they arise.
    """
    try:
        return torch.load(checkpoint_file, map_location="cpu")
    except Exception as e:
        try:
            with open(checkpoint_file) as f:
                if f.read().startswith("version"):
                    raise OSError(
                        "You seem to have cloned a repository without having git-lfs installed. Please install "
                        "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder "
                        "you cloned."
                    )
                else:
                    raise ValueError(
                        f"Unable to locate the file {checkpoint_file} which is necessary to load this pretrained "
                        "model. Make sure you have saved the model properly."
                    ) from e
        except (UnicodeDecodeError, ValueError):
            raise OSError(
                f"Unable to load weights from pytorch checkpoint file for '{checkpoint_file}' "
                f"at '{checkpoint_file}'. "
                "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True."
            )


def _load_state_dict_into_model(model_to_load, state_dict, start_prefix):
    # Convert old format to new format if needed from a PyTorch state_dict
    old_keys = []
    new_keys = []
    for key in state_dict.keys():
        new_key = None
        if "gamma" in key:
            new_key = key.replace("gamma", "weight")
        if "beta" in key:
            new_key = key.replace("beta", "bias")
        if new_key:
            old_keys.append(key)
            new_keys.append(new_key)
    for old_key, new_key in zip(old_keys, new_keys):
        state_dict[new_key] = state_dict.pop(old_key)

    # copy state_dict so _load_from_state_dict can modify it
    metadata = getattr(state_dict, "_metadata", None)
    state_dict = state_dict.copy()
    if metadata is not None:
        state_dict._metadata = metadata

    error_msgs = []

    # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
    # so we need to apply the function recursively.
    def load(module: nn.Module, prefix=""):
        local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
        args = (state_dict, prefix, local_metadata, True, [], [], error_msgs)
        if is_deepspeed_zero3_enabled():
            import deepspeed

            # because zero3 puts placeholders in model params, this context
            # manager gathers (unpartitions) the params of the current layer, then loads from
            # the state dict and then re-partitions them again
            with deepspeed.zero.GatheredParameters(list(module.parameters(recurse=False)), modifier_rank=0):
                if torch.distributed.get_rank() == 0:
                    module._load_from_state_dict(*args)
        else:
            module._load_from_state_dict(*args)

        for name, child in module._modules.items():
            if child is not None:
                load(child, prefix + name + ".")

    load(model_to_load, prefix=start_prefix)

    return error_msgs


403
class ModuleUtilsMixin:
Julien Chaumond's avatar
Julien Chaumond committed
404
    """
405
    A few utilities for `torch.nn.Modules`, to be used as a mixin.
Julien Chaumond's avatar
Julien Chaumond committed
406
407
    """

408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
    @staticmethod
    def _hook_rss_memory_pre_forward(module, *args, **kwargs):
        try:
            import psutil
        except (ImportError):
            raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")

        process = psutil.Process(os.getpid())
        mem = process.memory_info()
        module.mem_rss_pre_forward = mem.rss
        return None

    @staticmethod
    def _hook_rss_memory_post_forward(module, *args, **kwargs):
        try:
            import psutil
        except (ImportError):
            raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")

        process = psutil.Process(os.getpid())
        mem = process.memory_info()
        module.mem_rss_post_forward = mem.rss
        mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward
        module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0)
        return None

    def add_memory_hooks(self):
Sylvain Gugger's avatar
Sylvain Gugger committed
435
436
437
        """
        Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.

Sylvain Gugger's avatar
Sylvain Gugger committed
438
439
        Increase in memory consumption is stored in a `mem_rss_diff` attribute for each module and can be reset to zero
        with `model.reset_memory_hooks_state()`.
440
441
442
443
444
445
446
        """
        for module in self.modules():
            module.register_forward_pre_hook(self._hook_rss_memory_pre_forward)
            module.register_forward_hook(self._hook_rss_memory_post_forward)
        self.reset_memory_hooks_state()

    def reset_memory_hooks_state(self):
Sylvain Gugger's avatar
Sylvain Gugger committed
447
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
448
        Reset the `mem_rss_diff` attribute of each module (see [`~modeling_utils.ModuleUtilsMixin.add_memory_hooks`]).
Sylvain Gugger's avatar
Sylvain Gugger committed
449
        """
450
451
452
453
454
        for module in self.modules():
            module.mem_rss_diff = 0
            module.mem_rss_post_forward = 0
            module.mem_rss_pre_forward = 0

455
    @property
456
    def device(self) -> device:
457
        """
458
        `torch.device`: The device on which the module is (assuming that all the module parameters are on the same
459
        device).
460
        """
Lysandre Debut's avatar
Lysandre Debut committed
461
        return get_parameter_device(self)
462

463
    @property
464
    def dtype(self) -> torch.dtype:
465
        """
466
        `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
467
        """
Lysandre Debut's avatar
Lysandre Debut committed
468
        return get_parameter_dtype(self)
469
470

    def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:
Sylvain Gugger's avatar
Sylvain Gugger committed
471
472
473
474
        """
        Invert an attention mask (e.g., switches 0. and 1.).

        Args:
475
            encoder_attention_mask (`torch.Tensor`): An attention mask.
Sylvain Gugger's avatar
Sylvain Gugger committed
476
477

        Returns:
478
            `torch.Tensor`: The inverted attention mask.
Sylvain Gugger's avatar
Sylvain Gugger committed
479
        """
480
481
482
483
484
485
486
487
488
489
        if encoder_attention_mask.dim() == 3:
            encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
        if encoder_attention_mask.dim() == 2:
            encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
        # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
        # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
        # /transformer/transformer_layers.py#L270
        # encoder_extended_attention_mask = (encoder_extended_attention_mask ==
        # encoder_extended_attention_mask.transpose(-1, -2))
        encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype)  # fp16 compatibility
490
491
492

        if self.dtype == torch.float16:
            encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e4
493
        elif self.dtype in [torch.bfloat16, torch.float32]:
494
495
496
            encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -1e9
        else:
            raise ValueError(
497
                f"{self.dtype} not recognized. `dtype` should be set to either `torch.float32` or `torch.float16`"
498
499
            )

500
501
        return encoder_extended_attention_mask

502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
    def create_extended_attention_mask_for_decoder(self, input_shape, attention_mask, device):
        batch_size, seq_length = input_shape
        seq_ids = torch.arange(seq_length, device=device)
        causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
        # in case past_key_values are used we need to add a prefix ones mask to the causal mask
        # causal and attention masks must have same type with pytorch version < 1.3
        causal_mask = causal_mask.to(attention_mask.dtype)

        if causal_mask.shape[1] < attention_mask.shape[1]:
            prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
            causal_mask = torch.cat(
                [
                    torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype),
                    causal_mask,
                ],
                axis=-1,
            )

        extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
        return extended_attention_mask

Sylvain Gugger's avatar
Sylvain Gugger committed
523
524
525
    def get_extended_attention_mask(self, attention_mask: Tensor, input_shape: Tuple[int], device: device) -> Tensor:
        """
        Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
526
527

        Arguments:
528
            attention_mask (`torch.Tensor`):
Sylvain Gugger's avatar
Sylvain Gugger committed
529
                Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
530
            input_shape (`Tuple[int]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
531
                The shape of the input to the model.
532
            device: (`torch.device`):
Sylvain Gugger's avatar
Sylvain Gugger committed
533
                The device of the input to the model.
534
535

        Returns:
536
            `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
537
538
539
540
541
542
543
544
545
546
        """
        # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
        # ourselves in which case we just need to make it broadcastable to all heads.
        if attention_mask.dim() == 3:
            extended_attention_mask = attention_mask[:, None, :, :]
        elif attention_mask.dim() == 2:
            # Provided a padding mask of dimensions [batch_size, seq_length]
            # - if the model is a decoder, apply a causal mask in addition to the padding mask
            # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
            if self.config.is_decoder:
547
548
549
                extended_attention_mask = self.create_extended_attention_mask_for_decoder(
                    input_shape, attention_mask, device
                )
550
551
552
553
            else:
                extended_attention_mask = attention_mask[:, None, None, :]
        else:
            raise ValueError(
554
                f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
555
556
557
558
559
560
561
562
563
564
565
            )

        # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
        # masked positions, this operation will create a tensor which is 0.0 for
        # positions we want to attend and -10000.0 for masked positions.
        # Since we are adding it to the raw scores before the softmax, this is
        # effectively the same as removing these entirely.
        extended_attention_mask = extended_attention_mask.to(dtype=self.dtype)  # fp16 compatibility
        extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
        return extended_attention_mask

Sylvain Gugger's avatar
Sylvain Gugger committed
566
567
568
    def get_head_mask(
        self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False
    ) -> Tensor:
569
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
570
571
572
        Prepare the head mask if needed.

        Args:
573
            head_mask (`torch.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
574
                The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
575
            num_hidden_layers (`int`):
Sylvain Gugger's avatar
Sylvain Gugger committed
576
                The number of hidden layers in the model.
577
            is_attention_chunked: (`bool`, *optional*, defaults to `False`):
Sylvain Gugger's avatar
Sylvain Gugger committed
578
579
                Whether or not the attentions scores are computed by chunks or not.

580
        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
581
582
            `torch.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with
            `[None]` for each layer.
583
584
585
        """
        if head_mask is not None:
            head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
Patrick von Platen's avatar
Patrick von Platen committed
586
587
            if is_attention_chunked is True:
                head_mask = head_mask.unsqueeze(-1)
588
589
590
591
592
593
594
595
596
597
598
599
600
        else:
            head_mask = [None] * num_hidden_layers

        return head_mask

    def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
        """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
        if head_mask.dim() == 1:
            head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
            head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
        elif head_mask.dim() == 2:
            head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # We can specify head_mask for each layer
        assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
601
        head_mask = head_mask.to(dtype=self.dtype)  # switch to float if need + fp16 compatibility
602
603
        return head_mask

604
605
606
607
608
    def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int:
        """
        Get number of (optionally, trainable or non-embeddings) parameters in the module.

        Args:
609
            only_trainable (`bool`, *optional*, defaults to `False`):
610
611
                Whether or not to return only the number of trainable parameters

612
            exclude_embeddings (`bool`, *optional*, defaults to `False`):
613
614
615
                Whether or not to return only the number of non-embeddings parameters

        Returns:
616
            `int`: The number of parameters.
617
618
        """

619
620
621
622
623
624
625
626
627
628
        if exclude_embeddings:
            embedding_param_names = [
                f"{name}.weight" for name, module_type in self.named_modules() if isinstance(module_type, nn.Embedding)
            ]
            non_embedding_parameters = [
                parameter for name, parameter in self.named_parameters() if name not in embedding_param_names
            ]
            return sum(p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable)
        else:
            return sum(p.numel() for p in self.parameters() if p.requires_grad or not only_trainable)
629
630
631
632
633
634

    def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int:
        """
        Helper function to estimate the total number of tokens from the model inputs.

        Args:
635
            inputs (`dict`): The model inputs.
636
637

        Returns:
638
            `int`: The total number of tokens.
639
        """
640
641
        if self.main_input_name in input_dict:
            return input_dict[self.main_input_name].numel()
642
        else:
643
            logger.warning(
644
645
646
647
648
649
650
651
652
653
                "Could not estimate the number of tokens of the input, floating-point operations will not be computed"
            )
            return 0

    def floating_point_ops(
        self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool = True
    ) -> int:
        """
        Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a
        batch with this transformer model. Default approximation neglects the quadratic dependency on the number of
Sylvain Gugger's avatar
Sylvain Gugger committed
654
655
        tokens (valid if `12 * d_model << sequence_length`) as laid out in [this
        paper](https://arxiv.org/pdf/2001.08361.pdf) section 2.1. Should be overridden for transformers with parameter
Sylvain Gugger's avatar
Sylvain Gugger committed
656
        re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths.
657
658

        Args:
659
            batch_size (`int`):
660
661
                The batch size for the forward pass.

662
            sequence_length (`int`):
663
664
                The number of tokens in each line of the batch.

665
            exclude_embeddings (`bool`, *optional*, defaults to `True`):
666
667
668
                Whether or not to count embedding and softmax operations.

        Returns:
669
            `int`: The number of floating-point operations.
670
671
672
673
        """

        return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings)

Julien Chaumond's avatar
Julien Chaumond committed
674

Sylvain Gugger's avatar
Sylvain Gugger committed
675
class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMixin):
676
677
    r"""
    Base class for all models.
678

Sylvain Gugger's avatar
Sylvain Gugger committed
679
680
    [`PreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading,
    downloading and saving models as well as a few methods common to all models to:
681

682
683
        - resize the input embeddings,
        - prune heads in the self-attention heads.
684

685
    Class attributes (overridden by derived classes):
Sylvain Gugger's avatar
Sylvain Gugger committed
686

Sylvain Gugger's avatar
Sylvain Gugger committed
687
688
689
690
        - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class
          for this model architecture.
        - **load_tf_weights** (`Callable`) -- A python *method* for loading a TensorFlow checkpoint in a PyTorch model,
          taking as arguments:
691

Sylvain Gugger's avatar
Sylvain Gugger committed
692
693
            - **model** ([`PreTrainedModel`]) -- An instance of the model on which to load the TensorFlow checkpoint.
            - **config** ([`PreTrainedConfig`]) -- An instance of the configuration associated to the model.
694
            - **path** (`str`) -- A path to the TensorFlow checkpoint.
695

Sylvain Gugger's avatar
Sylvain Gugger committed
696
697
        - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived
          classes of the same architecture adding modules on top of the base model.
698
        - **is_parallelizable** (`bool`) -- A flag indicating whether this model supports model parallelization.
Sylvain Gugger's avatar
Sylvain Gugger committed
699
700
        - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP
          models, `pixel_values` for vision models and `input_values` for speech models).
701
    """
702
    config_class = None
703
    base_model_prefix = ""
704
    main_input_name = "input_ids"
705
    _auto_class = None
706

707
708
    # a list of `re` patterns of `state_dict` keys that should be removed from the list of missing
    # keys we find (keys inside the model but not in the checkpoint) and avoid unnecessary warnings.
709
    _keys_to_ignore_on_load_missing = None
710
711
712
    # a list of `re` patterns of `state_dict` keys that should be removed from the list of
    # unexpected keys we find (keys inside the checkpoint but not the model) and avoid unnecessary
    # warnings.
713
    _keys_to_ignore_on_load_unexpected = None
714
715
    # a list of `state_dict` keys to ignore when saving the model (useful for keys that aren't
    # trained, but which are either deterministic or tied variables)
716
    _keys_to_ignore_on_save = None
717

718
    is_parallelizable = False
719
    supports_gradient_checkpointing = False
720

721
    @property
722
    def dummy_inputs(self) -> Dict[str, torch.Tensor]:
723
        """
724
        `Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network.
725
        """
726
        return {"input_ids": torch.tensor(DUMMY_INPUTS)}
727

728
729
730
731
732
733
734
    @property
    def framework(self) -> str:
        """
        :str: Identifies that this is a PyTorch model.
        """
        return "pt"

735
    def __init__(self, config: PretrainedConfig, *inputs, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
736
        super().__init__()
737
738
        if not isinstance(config, PretrainedConfig):
            raise ValueError(
739
740
741
                f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
                "`PretrainedConfig`. To create a model from a pretrained model use "
                f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
742
            )
743
        # Save config and origin of the pretrained weights if given in model
744
        self.config = config
745
        self.name_or_path = config.name_or_path
746
747
748
749
750
751
752
753
754
755
756
757
758
759

    def post_init(self):
        """
        A method executed at the end of each Transformer model initialization, to execute code that needs the model's
        modules properly initialized (such as weight initialization).
        """
        self.init_weights()
        self._backward_compatibility_gradient_checkpointing()

    def _backward_compatibility_gradient_checkpointing(self):
        if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False):
            self.gradient_checkpointing_enable()
            # Remove the attribute now that is has been consumed, so it's no saved in the config.
            delattr(self.config, "gradient_checkpointing")
760

761
762
763
764
765
766
    @classmethod
    def _from_config(cls, config, **kwargs):
        """
        All context managers that the model should be initialized under go here.

        Args:
767
768
            torch_dtype (`torch.dtype`, *optional*):
                Override the default `torch.dtype` and load the model under this dtype.
769
770
771
772
773
774
775
776
777
778
779
780
781
782
        """
        torch_dtype = kwargs.pop("torch_dtype", None)

        # override default dtype if needed
        dtype_orig = None
        if torch_dtype is not None:
            dtype_orig = cls._set_default_torch_dtype(torch_dtype)

        if is_deepspeed_zero3_enabled():
            import deepspeed

            logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
            # this immediately partitions the model across all gpus, to avoid the overhead in time
            # and memory copying it on CPU or each GPU first
783
            with deepspeed.zero.Init(config_dict_or_path=deepspeed_config()):
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
                model = cls(config, **kwargs)
        else:
            model = cls(config, **kwargs)

        # restore default dtype if it was modified
        if dtype_orig is not None:
            torch.set_default_dtype(dtype_orig)

        return model

    @classmethod
    def _set_default_torch_dtype(cls, dtype: torch.dtype) -> torch.dtype:
        """
        Change the default dtype and return the previous one. This is needed when wanting to instantiate the model
        under specific dtype.

        Args:
801
            dtype (`torch.dtype`):
802
803
804
                a floating dtype to set to.

        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
805
806
            `torch.dtype`: the original `dtype` that can be used to restore `torch.set_default_dtype(dtype)` if it was
            modified. If it wasn't, returns `None`.
807

808
809
        Note `set_default_dtype` currently only works with floating-point types and asserts if for example,
        `torch.int64` is passed. So if a non-float `dtype` is passed this functions will throw an exception.
810
811
812
813
814
815
816
817
818
819
820
        """
        if not dtype.is_floating_point:
            raise ValueError(
                f"Can't instantiate {cls.__name__} model under dtype={dtype} since it is not a floating point dtype"
            )

        logger.info(f"Instantiating {cls.__name__} model under default dtype {dtype}.")
        dtype_orig = torch.get_default_dtype()
        torch.set_default_dtype(dtype)
        return dtype_orig

821
    @property
822
823
    def base_model(self) -> nn.Module:
        """
824
        `torch.nn.Module`: The main body of the model.
825
        """
826
        return getattr(self, self.base_model_prefix, self)
thomwolf's avatar
thomwolf committed
827

828
    def get_input_embeddings(self) -> nn.Module:
829
830
831
832
        """
        Returns the model's input embeddings.

        Returns:
833
            `nn.Module`: A torch module mapping vocabulary to hidden states.
thomwolf's avatar
thomwolf committed
834
        """
835
        base_model = getattr(self, self.base_model_prefix, self)
thomwolf's avatar
thomwolf committed
836
837
838
839
        if base_model is not self:
            return base_model.get_input_embeddings()
        else:
            raise NotImplementedError
thomwolf's avatar
thomwolf committed
840

841
    def set_input_embeddings(self, value: nn.Module):
842
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
843
        Set model's input embeddings.
844
845

        Args:
846
            value (`nn.Module`): A module mapping vocabulary to hidden states.
thomwolf's avatar
thomwolf committed
847
848
849
850
851
852
        """
        base_model = getattr(self, self.base_model_prefix, self)
        if base_model is not self:
            base_model.set_input_embeddings(value)
        else:
            raise NotImplementedError
thomwolf's avatar
thomwolf committed
853

854
    def get_output_embeddings(self) -> nn.Module:
855
856
857
858
        """
        Returns the model's output embeddings.

        Returns:
859
            `nn.Module`: A torch module mapping hidden states to vocabulary.
thomwolf's avatar
thomwolf committed
860
        """
861
        return None  # Overwrite for models with output embeddings
thomwolf's avatar
thomwolf committed
862

863
864
865
866
    def _init_weights(self, module):
        """
        Initialize the weights. This method should be overridden by derived class.
        """
867
        raise NotImplementedError(f"Make sure `_init_weights` is implemented for {self.__class__}")
868

869
    def tie_weights(self):
870
871
        """
        Tie the weights between the input embeddings and the output embeddings.
872

Sylvain Gugger's avatar
Sylvain Gugger committed
873
874
        If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the
        weights instead.
thomwolf's avatar
thomwolf committed
875
        """
876
877
878
879
        if getattr(self.config, "tie_word_embeddings", True):
            output_embeddings = self.get_output_embeddings()
            if output_embeddings is not None:
                self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
thomwolf's avatar
thomwolf committed
880

881
        if getattr(self.config, "is_encoder_decoder", False) and getattr(self.config, "tie_encoder_decoder", False):
Weizhen's avatar
Weizhen committed
882
883
            if hasattr(self, self.base_model_prefix):
                self = getattr(self, self.base_model_prefix)
884
885
            self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix)

Sylvain Gugger's avatar
Sylvain Gugger committed
886
887
888
889
        for module in self.modules():
            if hasattr(module, "_tie_weights"):
                module._tie_weights()

890
891
892
    @staticmethod
    def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str):
        uninitialized_encoder_weights: List[str] = []
Weizhen's avatar
Weizhen committed
893
894
895
896
        if decoder.__class__ != encoder.__class__:
            logger.info(
                f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder weights are correctly initialized."
            )
897
898
899
900
901
902
903
904
905
906

        def tie_encoder_to_decoder_recursively(
            decoder_pointer: nn.Module,
            encoder_pointer: nn.Module,
            module_name: str,
            uninitialized_encoder_weights: List[str],
            depth=0,
        ):
            assert isinstance(decoder_pointer, nn.Module) and isinstance(
                encoder_pointer, nn.Module
907
            ), f"{decoder_pointer} and {encoder_pointer} have to be of type nn.Module"
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
            if hasattr(decoder_pointer, "weight"):
                assert hasattr(encoder_pointer, "weight")
                encoder_pointer.weight = decoder_pointer.weight
                if hasattr(decoder_pointer, "bias"):
                    assert hasattr(encoder_pointer, "bias")
                    encoder_pointer.bias = decoder_pointer.bias
                return

            encoder_modules = encoder_pointer._modules
            decoder_modules = decoder_pointer._modules
            if len(decoder_modules) > 0:
                assert (
                    len(encoder_modules) > 0
                ), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"

                all_encoder_weights = set([module_name + "/" + sub_name for sub_name in encoder_modules.keys()])
                encoder_layer_pos = 0
                for name, module in decoder_modules.items():
                    if name.isdigit():
                        encoder_name = str(int(name) + encoder_layer_pos)
                        decoder_name = name
Weizhen's avatar
Weizhen committed
929
930
931
                        if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len(
                            encoder_modules
                        ) != len(decoder_modules):
932
933
                            # this can happen if the name corresponds to the position in a list module list of layers
                            # in this case the decoder has added a cross-attention that the encoder does not have
934
                            # thus skip this step and subtract one layer pos from encoder
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
                            encoder_layer_pos -= 1
                            continue
                    elif name not in encoder_modules:
                        continue
                    elif depth > 500:
                        raise ValueError(
                            "Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is a circular dependency between two or more `nn.Modules` of your model."
                        )
                    else:
                        decoder_name = encoder_name = name
                    tie_encoder_to_decoder_recursively(
                        decoder_modules[decoder_name],
                        encoder_modules[encoder_name],
                        module_name + "/" + name,
                        uninitialized_encoder_weights,
                        depth=depth + 1,
                    )
                    all_encoder_weights.remove(module_name + "/" + encoder_name)

                uninitialized_encoder_weights += list(all_encoder_weights)

        # tie weights recursively
        tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights)
        if len(uninitialized_encoder_weights) > 0:
            logger.warning(
                f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}"
            )

963
    def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
Lysandre's avatar
Lysandre committed
964
        """Tie or clone module weights depending of whether we are using TorchScript or not"""
thomwolf's avatar
thomwolf committed
965
        if self.config.torchscript:
966
            output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
thomwolf's avatar
thomwolf committed
967
        else:
968
            output_embeddings.weight = input_embeddings.weight
thomwolf's avatar
thomwolf committed
969

Sam Shleifer's avatar
Sam Shleifer committed
970
        if getattr(output_embeddings, "bias", None) is not None:
971
            output_embeddings.bias.data = nn.functional.pad(
972
                output_embeddings.bias.data,
Lysandre's avatar
Lysandre committed
973
974
975
976
                (
                    0,
                    output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],
                ),
977
978
                "constant",
                0,
979
            )
980
        if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
981
            output_embeddings.out_features = input_embeddings.num_embeddings
982

983
    def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
984
        """
985
        Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`.
986

987
        Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
thomwolf's avatar
thomwolf committed
988

989
        Arguments:
990
            new_num_tokens (`int`, *optional*):
991
                The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
Sylvain Gugger's avatar
Sylvain Gugger committed
992
993
                vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just
                returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything.
994
995

        Return:
996
            `torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model.
thomwolf's avatar
thomwolf committed
997
        """
998
        model_embeds = self._resize_token_embeddings(new_num_tokens)
thomwolf's avatar
thomwolf committed
999
1000
        if new_num_tokens is None:
            return model_embeds
thomwolf's avatar
thomwolf committed
1001
1002
1003

        # Update base model and current model config
        self.config.vocab_size = new_num_tokens
1004
        self.vocab_size = new_num_tokens
thomwolf's avatar
thomwolf committed
1005
1006

        # Tie weights again if needed
1007
        self.tie_weights()
thomwolf's avatar
thomwolf committed
1008

thomwolf's avatar
thomwolf committed
1009
1010
        return model_embeds

1011
    def _resize_token_embeddings(self, new_num_tokens):
thomwolf's avatar
thomwolf committed
1012
1013
1014
        old_embeddings = self.get_input_embeddings()
        new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
        self.set_input_embeddings(new_embeddings)
1015
1016
1017
1018
1019
1020
1021

        # if word embeddings are not tied, make sure that lm head is resized as well
        if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings:
            old_lm_head = self.get_output_embeddings()
            new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens)
            self.set_output_embeddings(new_lm_head)

thomwolf's avatar
thomwolf committed
1022
        return self.get_input_embeddings()
1023

1024
    def _get_resized_embeddings(
1025
1026
        self, old_embeddings: nn.Embedding, new_num_tokens: Optional[int] = None
    ) -> nn.Embedding:
1027
1028
1029
        """
        Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly
        initialized vectors at the end. Reducing the size will remove vectors from the end
1030
1031

        Args:
1032
            old_embeddings (`torch.nn.Embedding`):
1033
                Old embeddings to be resized.
1034
            new_num_tokens (`int`, *optional*):
1035
                New number of tokens in the embedding matrix.
1036
1037

                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
1038
1039
                vectors from the end. If not provided or `None`, just returns a pointer to the input tokens
                ``torch.nn.Embedding``` module of the model without doing anything.
1040
1041

        Return:
1042
1043
            `torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if
            `new_num_tokens` is `None`
1044
1045
1046
1047
        """
        if new_num_tokens is None:
            return old_embeddings

1048
1049
1050
1051
1052
1053
1054
1055
        if is_deepspeed_zero3_enabled():
            import deepspeed

            with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None):
                old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
        else:
            old_num_tokens, old_embedding_dim = old_embeddings.weight.size()

1056
1057
1058
        if old_num_tokens == new_num_tokens:
            return old_embeddings

1059
1060
        if not isinstance(old_embeddings, nn.Embedding):
            raise TypeError(
1061
                f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}. "
1062
1063
1064
                f"You should either use a different resize function or make sure that `old_embeddings` are an instance of {nn.Embedding}."
            )

1065
        # Build new embeddings
1066
1067
        new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
        new_embeddings.to(self.device, dtype=old_embeddings.weight.dtype)
1068
1069
1070
1071

        # initialize all new embeddings (in particular added tokens)
        self._init_weights(new_embeddings)

1072
        # Copy token embeddings from the previous weights
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083

        # numbers of tokens to copy
        n = min(old_num_tokens, new_num_tokens)
        if is_deepspeed_zero3_enabled():
            import deepspeed

            with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=0):
                if torch.distributed.get_rank() == 0:
                    new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]
        else:
            new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]
1084
1085
1086

        return new_embeddings

1087
    def _get_resized_lm_head(
1088
1089
        self, old_lm_head: nn.Linear, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False
    ) -> nn.Linear:
1090
1091
1092
1093
1094
        """
        Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized
        vectors at the end. Reducing the size will remove vectors from the end

        Args:
1095
            old_lm_head (`torch.nn.Linear`):
1096
                Old lm head liner layer to be resized.
1097
            new_num_tokens (`int`, *optional*):
1098
1099
1100
                New number of tokens in the linear matrix.

                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
1101
                vectors from the end. If not provided or `None`, just returns a pointer to the input tokens
Sylvain Gugger's avatar
Sylvain Gugger committed
1102
1103
1104
                ``torch.nn.Linear``` module of the model without doing anything. transposed (`bool`, *optional*,
                defaults to `False`): Whether `old_lm_head` is transposed or not. If True `old_lm_head.size()` is
                `lm_head_dim, vocab_size` else `vocab_size, lm_head_dim`.
1105
1106

        Return:
Sylvain Gugger's avatar
Sylvain Gugger committed
1107
1108
            `torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if `new_num_tokens` is
            `None`
1109
1110
1111
1112
        """
        if new_num_tokens is None:
            return old_lm_head

1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
        if is_deepspeed_zero3_enabled():
            import deepspeed

            with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=None):
                old_num_tokens, old_lm_head_dim = (
                    old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size()
                )
        else:
            old_num_tokens, old_lm_head_dim = (
                old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size()
            )
1124
1125
1126
1127
1128
1129

        if old_num_tokens == new_num_tokens:
            return old_lm_head

        if not isinstance(old_lm_head, nn.Linear):
            raise TypeError(
1130
                f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}. "
1131
                f"You should either use a different resize function or make sure that `old_lm_head` are an instance of {nn.Linear}."
1132
1133
1134
1135
1136
            )

        # Build new lm head
        new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim)
        has_new_lm_head_bias = old_lm_head.bias is not None
1137
1138
        new_lm_head = nn.Linear(*new_lm_head_shape, bias=has_new_lm_head_bias)
        new_lm_head = new_lm_head.to(self.device, dtype=old_lm_head.weight.dtype)
1139
1140
1141
1142
1143
1144

        # initialize new lm head (in particular added tokens)
        self._init_weights(new_lm_head)

        num_tokens_to_copy = min(old_num_tokens, new_num_tokens)

1145
1146
1147
1148
        # XXX: put the long block of code in a wrapper
        if is_deepspeed_zero3_enabled():
            import deepspeed

1149
1150
            params = [old_lm_head.weight, old_lm_head.bias, new_lm_head.weight, new_lm_head.bias]
            with deepspeed.zero.GatheredParameters(params, modifier_rank=0):
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
                if torch.distributed.get_rank() == 0:
                    # Copy old lm head weights to new lm head
                    if not transposed:
                        new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[
                            :num_tokens_to_copy, :
                        ]
                    else:
                        new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[
                            :, :num_tokens_to_copy
                        ]

                    # Copy bias weights to new lm head
                    if has_new_lm_head_bias:
                        new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy]
1165
        else:
1166
1167
1168
1169
1170
            # Copy old lm head weights to new lm head
            if not transposed:
                new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :]
            else:
                new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy]
1171

1172
1173
1174
            # Copy bias weights to new lm head
            if has_new_lm_head_bias:
                new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy]
1175
1176
1177

        return new_lm_head

1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
    def resize_position_embeddings(self, new_num_position_embeddings: int):
        raise NotImplementedError(
            f"`resize_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should "
            f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`"
        )

    def get_position_embeddings(self) -> Union[nn.Embedding, Tuple[nn.Embedding]]:
        raise NotImplementedError(
            f"`get_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should "
            f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`"
        )

1190
    def init_weights(self):
1191
        """
1192
        If needed prunes and maybe initializes weights.
1193
        """
1194
1195
1196
1197
        # Prune heads if needed
        if self.config.pruned_heads:
            self.prune_heads(self.config.pruned_heads)

1198
1199
1200
1201
1202
1203
1204
        if _init_weights:
            # Initialize weights
            self.apply(self._init_weights)

            # Tie weights should be skipped when not initializing all weights
            # since from_pretrained(...) calls tie weights anyways
            self.tie_weights()
1205

1206
1207
1208
    def prune_heads(self, heads_to_prune: Dict[int, List[int]]):
        """
        Prunes heads of the base model.
1209

1210
        Arguments:
1211
            heads_to_prune (`Dict[int, List[int]]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1212
1213
1214
                Dictionary with keys being selected layer indices (`int`) and associated values being the list of heads
                to prune in said layer (list of `int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on
                layer 1 and heads 2 and 3 on layer 2.
thomwolf's avatar
thomwolf committed
1215
        """
1216
        # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
1217
        for layer, heads in heads_to_prune.items():
1218
1219
1220
            union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
            self.config.pruned_heads[layer] = list(union_heads)  # Unfortunately we have to store it as list for JSON

1221
        self.base_model._prune_heads(heads_to_prune)
thomwolf's avatar
thomwolf committed
1222

1223
    def gradient_checkpointing_enable(self):
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
        """
        Activates gradient checkpointing for the current model.

        Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint
        activations".
        """
        if not self.supports_gradient_checkpointing:
            raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.")
        self.apply(partial(self._set_gradient_checkpointing, value=True))

1234
    def gradient_checkpointing_disable(self):
1235
1236
1237
1238
1239
1240
1241
1242
1243
        """
        Deactivates gradient checkpointing for the current model.

        Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint
        activations".
        """
        if self.supports_gradient_checkpointing:
            self.apply(partial(self._set_gradient_checkpointing, value=False))

1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
    @property
    def is_gradient_checkpointing(self) -> bool:
        """
        Whether gradient checkpointing is activated for this model or not.

        Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint
        activations".
        """
        return any(hasattr(m, "gradient_checkpointing") and m.gradient_checkpointing for m in self.modules())

1254
1255
1256
1257
1258
1259
    def save_pretrained(
        self,
        save_directory: Union[str, os.PathLike],
        save_config: bool = True,
        state_dict: Optional[dict] = None,
        save_function: Callable = torch.save,
Sylvain Gugger's avatar
Sylvain Gugger committed
1260
        push_to_hub: bool = False,
Sylvain Gugger's avatar
Sylvain Gugger committed
1261
        max_shard_size: Union[int, str] = "10GB",
Sylvain Gugger's avatar
Sylvain Gugger committed
1262
        **kwargs,
1263
    ):
1264
1265
        """
        Save a model and its configuration file to a directory, so that it can be re-loaded using the
1266
        `[`~PreTrainedModel.from_pretrained`]` class method.
1267

1268
        Arguments:
1269
            save_directory (`str` or `os.PathLike`):
1270
                Directory to which to save. Will be created if it doesn't exist.
1271
            save_config (`bool`, *optional*, defaults to `True`):
1272
                Whether or not to save the config of the model. Useful when in distributed training like TPUs and need
Sylvain Gugger's avatar
Sylvain Gugger committed
1273
1274
                to call this function on all processes. In this case, set `save_config=True` only on the main process
                to avoid race conditions.
1275
            state_dict (nested dictionary of `torch.Tensor`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1276
1277
1278
                The state dictionary of the model to save. Will default to `self.state_dict()`, but can be used to only
                save parts of the model or if special precautions need to be taken when recovering the state dictionary
                of a model (like when using model parallelism).
1279
            save_function (`Callable`):
1280
                The function to use to save the state dictionary. Useful on distributed training like TPUs when one
1281
1282
                need to replace `torch.save` by another method.
            push_to_hub (`bool`, *optional*, defaults to `False`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1283
                Whether or not to push your model to the Hugging Face model hub after saving it.
1284

1285
                <Tip warning={true}>
1286

Sylvain Gugger's avatar
Sylvain Gugger committed
1287
1288
1289
                Using `push_to_hub=True` will synchronize the repository you are pushing to with `save_directory`,
                which requires `save_directory` to be a local clone of the repo you are pushing to if it's an existing
                folder. Pass along `temp_dir=True` to use a temporary directory instead.
1290
1291

                </Tip>
1292

Sylvain Gugger's avatar
Sylvain Gugger committed
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
            max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
                The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size
                lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`).

                <Tip warning={true}>

                If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard
                which will be bigger than `max_shard_size`.

                </Tip>

Sylvain Gugger's avatar
Sylvain Gugger committed
1304
            kwargs:
1305
                Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
1306
        """
1307
        if os.path.isfile(save_directory):
1308
            logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
1309
            return
1310
1311
1312
1313
1314

        if push_to_hub:
            commit_message = kwargs.pop("commit_message", None)
            repo = self._create_or_get_repo(save_directory, **kwargs)

1315
        os.makedirs(save_directory, exist_ok=True)
1316

Julien Chaumond's avatar
Julien Chaumond committed
1317
        # Only save the model itself if we are using distributed training
1318
        model_to_save = unwrap_model(self)
1319

1320
1321
1322
1323
1324
        # save the string version of dtype to the config, e.g. convert torch.float32 => "float32"
        # we currently don't use this setting automatically, but may start to use with v5
        dtype = get_parameter_dtype(model_to_save)
        model_to_save.config.torch_dtype = str(dtype).split(".")[1]

Julien Chaumond's avatar
Julien Chaumond committed
1325
1326
1327
        # Attach architecture to the config
        model_to_save.config.architectures = [model_to_save.__class__.__name__]

1328
1329
1330
1331
1332
        # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be
        # loaded from the Hub.
        if self._auto_class is not None:
            custom_object_save(self, save_directory, config=self.config)

1333
1334
1335
1336
1337
1338
1339
        # Save the config
        if save_config:
            model_to_save.config.save_pretrained(save_directory)

        # Save the model
        if state_dict is None:
            state_dict = model_to_save.state_dict()
1340
1341

        # Handle the case where some state_dict keys shouldn't be saved
1342
        if self._keys_to_ignore_on_save is not None:
1343
            for ignore_key in self._keys_to_ignore_on_save:
1344
1345
                if ignore_key in state_dict.keys():
                    del state_dict[ignore_key]
1346

Sylvain Gugger's avatar
Sylvain Gugger committed
1347
1348
1349
1350
1351
1352
1353
1354
        # Shard the model if it is too big.
        shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size)

        # Clean the folder from a previous save
        for filename in os.listdir(save_directory):
            full_filename = os.path.join(save_directory, filename)
            if filename.startswith(WEIGHTS_NAME[:-4]) and os.path.isfile(full_filename):
                os.remove(full_filename)
1355

Sylvain Gugger's avatar
Sylvain Gugger committed
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
        # Save the model
        for shard_file, shard in shards.items():
            save_function(shard, os.path.join(save_directory, shard_file))

        if index is None:
            logger.info(f"Model weights saved in {os.path.join(save_directory, WEIGHTS_NAME)}")
        else:
            save_index_file = os.path.join(save_directory, WEIGHTS_INDEX_NAME)
            # Save the index as well
            with open(save_index_file, "w", encoding="utf-8") as f:
                content = json.dumps(index, indent=2, sort_keys=True) + "\n"
                f.write(content)
            logger.info(
                f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be "
                f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
                f"index located at {save_index_file}."
            )
1373

Sylvain Gugger's avatar
Sylvain Gugger committed
1374
        if push_to_hub:
1375
            url = self._push_to_hub(repo, commit_message=commit_message)
Sylvain Gugger's avatar
Sylvain Gugger committed
1376
1377
            logger.info(f"Model pushed to the hub in this commit: {url}")

1378
    @classmethod
1379
    def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
1380
1381
        r"""
        Instantiate a pretrained pytorch model from a pre-trained model configuration.
1382

Sylvain Gugger's avatar
Sylvain Gugger committed
1383
1384
        The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
        the model, you should first set it back in training mode with `model.train()`.
1385

1386
        The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
1387
1388
        pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
        task.
1389

1390
        The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
1391
        weights are discarded.
1392

1393
        Parameters:
1394
            pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
1395
1396
                Can be either:

1397
                    - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Sylvain Gugger's avatar
Sylvain Gugger committed
1398
1399
                      Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
                      user or organization name, like `dbmdz/bert-base-german-cased`.
1400
1401
1402
                    - A path to a *directory* containing model weights saved using
                      [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
                    - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
Sylvain Gugger's avatar
Sylvain Gugger committed
1403
1404
1405
                      this case, `from_tf` should be set to `True` and a configuration object should be provided as
                      `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
                      PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
1406
                    - A path or url to a model folder containing a *flax checkpoint file* in *.msgpack* format (e.g,
Sylvain Gugger's avatar
Sylvain Gugger committed
1407
1408
                      `./flax_model/` containing `flax_model.msgpack`). In this case, `from_flax` should be set to
                      `True`.
1409
1410
1411
1412
1413
                    - `None` if you are both providing the configuration and state dictionary (resp. with keyword
                      arguments `config` and `state_dict`).
            model_args (sequence of positional arguments, *optional*):
                All remaining positional arguments will be passed to the underlying model's `__init__` method.
            config (`Union[PretrainedConfig, str, os.PathLike]`, *optional*):
1414
1415
                Can be either:

1416
1417
                    - an instance of a class derived from [`PretrainedConfig`],
                    - a string or path valid as input to [`~PretrainedConfig.from_pretrained`].
1418

1419
                Configuration for the model to use instead of an automatically loaded configuration. Configuration can
1420
1421
                be automatically loaded when:

1422
                    - The model is a model provided by the library (loaded with the *model id* string of a pretrained
1423
                      model).
Sylvain Gugger's avatar
Sylvain Gugger committed
1424
1425
                    - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
                      save directory.
1426
1427
1428
                    - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
                      configuration JSON file named *config.json* is found in the directory.
            state_dict (`Dict[str, torch.Tensor]`, *optional*):
1429
1430
1431
                A state dictionary to use instead of a state dictionary loaded from saved weights file.

                This option can be used if you want to create a model from a pretrained configuration but load your own
Sylvain Gugger's avatar
Sylvain Gugger committed
1432
                weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and
1433
1434
                [`~PreTrainedModel.from_pretrained`] is not a simpler option.
            cache_dir (`Union[str, os.PathLike]`, *optional*):
1435
1436
                Path to a directory in which a downloaded pretrained model configuration should be cached if the
                standard cache should not be used.
1437
            from_tf (`bool`, *optional*, defaults to `False`):
1438
                Load the model weights from a TensorFlow checkpoint save file (see docstring of
1439
1440
                `pretrained_model_name_or_path` argument).
            from_flax (`bool`, *optional*, defaults to `False`):
1441
                Load the model weights from a Flax checkpoint save file (see docstring of
1442
1443
                `pretrained_model_name_or_path` argument).
            ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):
1444
1445
1446
                Whether or not to raise an error if some of the weights from the checkpoint do not have the same size
                as the weights of the model (if for instance, you are instantiating a model with 10 labels from a
                checkpoint with 3 labels).
1447
            force_download (`bool`, *optional*, defaults to `False`):
1448
1449
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
1450
            resume_download (`bool`, *optional*, defaults to `False`):
1451
1452
                Whether or not to delete incompletely received files. Will attempt to resume the download if such a
                file exists.
1453
            proxies (`Dict[str, str]`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
1454
1455
                A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
1456
            output_loading_info(`bool`, *optional*, defaults to `False`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1457
                Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
1458
            local_files_only(`bool`, *optional*, defaults to `False`):
Stas Bekman's avatar
Stas Bekman committed
1459
                Whether or not to only look at local files (i.e., do not try to download the model).
1460
            use_auth_token (`str` or *bool*, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
1461
1462
                The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
                when running `transformers-cli login` (stored in `~/.huggingface`).
1463
            revision (`str`, *optional*, defaults to `"main"`):
Julien Chaumond's avatar
Julien Chaumond committed
1464
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
1465
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
Julien Chaumond's avatar
Julien Chaumond committed
1466
                identifier allowed by git.
1467
            mirror (`str`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
1468
1469
1470
                Mirror source to accelerate downloads in China. If you are from China and have an accessibility
                problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
                Please refer to the mirror site for more information.
1471
            _fast_init(`bool`, *optional*, defaults to `True`):
1472
1473
                Whether or not to disable fast initialization.

1474
1475
                <Tip warning={true}>

Sylvain Gugger's avatar
Sylvain Gugger committed
1476
1477
1478
                One should only disable *_fast_init* to ensure backwards compatibility with `transformers.__version__ <
                4.6.0` for seeded model initialization. This argument will be removed at the next major version. See
                [pull request 11471](https://github.com/huggingface/transformers/pull/11471) for more information.
1479

1480
                </Tip>
1481

1482
1483
1484
1485
1486
1487
            low_cpu_mem_usage(`bool`, *optional*, defaults to `False`):
                Tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
                This is an experimental feature and a subject to change at any moment.
            torch_dtype (`str` or `torch.dtype`, *optional*):
                Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype
                will be automatically derived from the model's weights.
1488
            kwargs (remaining dictionary of keyword arguments, *optional*):
1489
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
1490
                `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
1491
1492
                automatically loaded:

1493
1494
                    - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
                      underlying model's `__init__` method (we assume all relevant updates to the configuration have
1495
                      already been done)
1496
                    - If a configuration is not provided, `kwargs` will be first passed to the configuration class
Sylvain Gugger's avatar
Sylvain Gugger committed
1497
1498
1499
1500
                      initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
                      corresponds to a configuration attribute will be used to override said attribute with the
                      supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
                      will be passed to the underlying model's `__init__` function.
1501
1502
1503
1504
1505
1506
1507
1508
1509

        <Tip>

        Passing `use_auth_token=True`` is required when you want to use a private model.

        </Tip>

        <Tip>

Sylvain Gugger's avatar
Sylvain Gugger committed
1510
1511
        Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to
        use this method in a firewalled environment.
1512
1513
1514
1515
1516
1517
1518

        </Tip>

        Examples:

        ```python
        >>> from transformers import BertConfig, BertModel
Sylvain Gugger's avatar
Sylvain Gugger committed
1519

1520
        >>> # Download model and configuration from huggingface.co and cache.
Sylvain Gugger's avatar
Sylvain Gugger committed
1521
        >>> model = BertModel.from_pretrained("bert-base-uncased")
1522
        >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable).
Sylvain Gugger's avatar
Sylvain Gugger committed
1523
        >>> model = BertModel.from_pretrained("./test/saved_model/")
1524
        >>> # Update configuration during loading.
Sylvain Gugger's avatar
Sylvain Gugger committed
1525
        >>> model = BertModel.from_pretrained("bert-base-uncased", output_attentions=True)
1526
1527
        >>> assert model.config.output_attentions == True
        >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).
Sylvain Gugger's avatar
Sylvain Gugger committed
1528
1529
        >>> config = BertConfig.from_json_file("./tf_model/my_tf_model_config.json")
        >>> model = BertModel.from_pretrained("./tf_model/my_tf_checkpoint.ckpt.index", from_tf=True, config=config)
1530
        >>> # Loading from a Flax checkpoint file instead of a PyTorch model (slower)
Sylvain Gugger's avatar
Sylvain Gugger committed
1531
        >>> model = BertModel.from_pretrained("bert-base-uncased", from_flax=True)
1532
        ```"""
1533
1534
1535
1536
        config = kwargs.pop("config", None)
        state_dict = kwargs.pop("state_dict", None)
        cache_dir = kwargs.pop("cache_dir", None)
        from_tf = kwargs.pop("from_tf", False)
1537
        from_flax = kwargs.pop("from_flax", False)
1538
        ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False)
1539
1540
1541
1542
        force_download = kwargs.pop("force_download", False)
        resume_download = kwargs.pop("resume_download", False)
        proxies = kwargs.pop("proxies", None)
        output_loading_info = kwargs.pop("output_loading_info", False)
1543
        local_files_only = kwargs.pop("local_files_only", False)
1544
        use_auth_token = kwargs.pop("use_auth_token", None)
Julien Chaumond's avatar
Julien Chaumond committed
1545
        revision = kwargs.pop("revision", None)
1546
        mirror = kwargs.pop("mirror", None)
1547
1548
        from_pipeline = kwargs.pop("_from_pipeline", None)
        from_auto_class = kwargs.pop("_from_auto", False)
1549
        _fast_init = kwargs.pop("_fast_init", True)
1550
        torch_dtype = kwargs.pop("torch_dtype", None)
1551
        low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", False)
1552
1553

        from_pt = not (from_tf | from_flax)
1554
1555
1556
1557

        user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class}
        if from_pipeline is not None:
            user_agent["using_pipeline"] = from_pipeline
thomwolf's avatar
thomwolf committed
1558

1559
1560
1561
1562
        if is_offline_mode() and not local_files_only:
            logger.info("Offline mode: forcing local_files_only=True")
            local_files_only = True

1563
1564
1565
        # Load config if we don't provide a configuration
        if not isinstance(config, PretrainedConfig):
            config_path = config if config is not None else pretrained_model_name_or_path
1566
            config, model_kwargs = cls.config_class.from_pretrained(
1567
1568
1569
                config_path,
                cache_dir=cache_dir,
                return_unused_kwargs=True,
1570
                force_download=force_download,
1571
                resume_download=resume_download,
1572
                proxies=proxies,
1573
                local_files_only=local_files_only,
1574
                use_auth_token=use_auth_token,
Julien Chaumond's avatar
Julien Chaumond committed
1575
                revision=revision,
1576
1577
                _from_auto=from_auto_class,
                _from_pipeline=from_pipeline,
1578
                **kwargs,
1579
1580
1581
            )
        else:
            model_kwargs = kwargs
1582

Sylvain Gugger's avatar
Sylvain Gugger committed
1583
1584
1585
1586
        # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the
        # index of the files.
        is_sharded = False
        sharded_metadata = None
thomwolf's avatar
thomwolf committed
1587
        # Load model
thomwolf's avatar
thomwolf committed
1588
        if pretrained_model_name_or_path is not None:
1589
            pretrained_model_name_or_path = str(pretrained_model_name_or_path)
1590
            if os.path.isdir(pretrained_model_name_or_path):
thomwolf's avatar
thomwolf committed
1591
                if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
1592
                    # Load from a TF 1.0 checkpoint in priority if from_tf
thomwolf's avatar
thomwolf committed
1593
                    archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
thomwolf's avatar
thomwolf committed
1594
                elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
1595
                    # Load from a TF 2.0 checkpoint in priority if from_tf
thomwolf's avatar
thomwolf committed
1596
                    archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
1597
1598
1599
                elif from_flax and os.path.isfile(os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)):
                    # Load from a Flax checkpoint in priority if from_flax
                    archive_file = os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME)
thomwolf's avatar
thomwolf committed
1600
1601
                elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
                    # Load from a PyTorch checkpoint
thomwolf's avatar
thomwolf committed
1602
                    archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
Sylvain Gugger's avatar
Sylvain Gugger committed
1603
1604
1605
1606
                elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME)):
                    # Load from a sharded PyTorch checkpoint
                    archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_INDEX_NAME)
                    is_sharded = True
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
                # At this stage we don't have a weight file so we will raise an error.
                elif os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
                ) or os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
                    raise EnvironmentError(
                        f"Error no file named {WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} but "
                        "there is a file for TensorFlow weights. Use `from_tf=True` to load this model from those "
                        "weights."
                    )
                elif os.path.join(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME):
                    raise EnvironmentError(
                        f"Error no file named {WEIGHTS_NAME} found in directory {pretrained_model_name_or_path} but "
                        "there is a file for Flax weights. Use `from_flax=True` to load this model from those "
                        "weights."
                    )
thomwolf's avatar
thomwolf committed
1622
                else:
1623
                    raise EnvironmentError(
1624
1625
                        f"Error no file named {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME + '.index'} or "
                        f"{FLAX_WEIGHTS_NAME} found in directory {pretrained_model_name_or_path}."
1626
                    )
1627
            elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
1628
                archive_file = pretrained_model_name_or_path
1629
            elif os.path.isfile(pretrained_model_name_or_path + ".index"):
1630
1631
1632
1633
1634
                if not from_tf:
                    raise ValueError(
                        f"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set "
                        "from_tf to True to load from this checkpoint."
                    )
1635
                archive_file = pretrained_model_name_or_path + ".index"
1636
            else:
1637
1638
1639
1640
1641
1642
1643
1644
                # set correct filename
                if from_tf:
                    filename = TF2_WEIGHTS_NAME
                elif from_flax:
                    filename = FLAX_WEIGHTS_NAME
                else:
                    filename = WEIGHTS_NAME

thomwolf's avatar
thomwolf committed
1645
                archive_file = hf_bucket_url(
Julien Chaumond's avatar
Julien Chaumond committed
1646
                    pretrained_model_name_or_path,
1647
                    filename=filename,
Julien Chaumond's avatar
Julien Chaumond committed
1648
                    revision=revision,
1649
                    mirror=mirror,
thomwolf's avatar
thomwolf committed
1650
                )
1651

thomwolf's avatar
thomwolf committed
1652
            try:
1653
                # Load from URL or cache if already cached
1654
1655
1656
1657
1658
1659
                resolved_archive_file = cached_path(
                    archive_file,
                    cache_dir=cache_dir,
                    force_download=force_download,
                    proxies=proxies,
                    resume_download=resume_download,
1660
                    local_files_only=local_files_only,
1661
                    use_auth_token=use_auth_token,
1662
                    user_agent=user_agent,
1663
                )
1664

1665
            except RepositoryNotFoundError:
1666
1667
1668
1669
1670
1671
                raise EnvironmentError(
                    f"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier "
                    "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a "
                    "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli "
                    "login` and pass `use_auth_token=True`."
                )
1672
            except RevisionNotFoundError:
1673
1674
1675
1676
1677
                raise EnvironmentError(
                    f"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for "
                    "this model name. Check the model page at "
                    f"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions."
                )
1678
            except EntryNotFoundError:
1679
                if filename == WEIGHTS_NAME:
Sylvain Gugger's avatar
Sylvain Gugger committed
1680
1681
1682
1683
1684
1685
1686
                    try:
                        # Maybe the checkpoint is sharded, we try to grab the index name in this case.
                        archive_file = hf_bucket_url(
                            pretrained_model_name_or_path,
                            filename=WEIGHTS_INDEX_NAME,
                            revision=revision,
                            mirror=mirror,
1687
                        )
Sylvain Gugger's avatar
Sylvain Gugger committed
1688
1689
1690
1691
1692
1693
1694
1695
1696
                        resolved_archive_file = cached_path(
                            archive_file,
                            cache_dir=cache_dir,
                            force_download=force_download,
                            proxies=proxies,
                            resume_download=resume_download,
                            local_files_only=local_files_only,
                            use_auth_token=use_auth_token,
                            user_agent=user_agent,
1697
                        )
Sylvain Gugger's avatar
Sylvain Gugger committed
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
                        is_sharded = True
                    except EntryNotFoundError:
                        # Otherwise, maybe there is a TF or Flax model file.  We try those to give a helpful error
                        # message.
                        has_file_kwargs = {
                            "revision": revision,
                            "mirror": mirror,
                            "proxies": proxies,
                            "use_auth_token": use_auth_token,
                        }
                        if has_file(pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **has_file_kwargs):
                            raise EnvironmentError(
                                f"{pretrained_model_name_or_path} does not appear to have a file named {WEIGHTS_NAME} but "
                                "there is a file for TensorFlow weights. Use `from_tf=True` to load this model from those "
                                "weights."
                            )
                        elif has_file(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME, **has_file_kwargs):
                            raise EnvironmentError(
                                f"{pretrained_model_name_or_path} does not appear to have a file named {WEIGHTS_NAME} but "
                                "there is a file for Flax weights. Use `from_flax=True` to load this model from those "
                                "weights."
                            )
                        else:
                            raise EnvironmentError(
                                f"{pretrained_model_name_or_path} does not appear to have a file named {WEIGHTS_NAME}, "
                                f"{TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or {FLAX_WEIGHTS_NAME}."
                            )
1725
1726
1727
1728
                else:
                    raise EnvironmentError(
                        f"{pretrained_model_name_or_path} does not appear to have a file named {filename}."
                    )
1729
            except HTTPError as err:
1730
                raise EnvironmentError(
1731
1732
1733
1734
1735
                    f"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n"
                    f"{err}"
                )
            except ValueError:
                raise EnvironmentError(
1736
                    f"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it in the cached "
1737
1738
1739
                    f"files and it looks like {pretrained_model_name_or_path} is not the path to a directory "
                    f"containing a file named {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or "
                    f"{FLAX_WEIGHTS_NAME}.\n"
1740
1741
1742
                    "Checkout your internet connection or see how to run the library in offline mode at "
                    "'https://huggingface.co/docs/transformers/installation#offline-mode'."
                )
1743
            except EnvironmentError:
1744
1745
1746
1747
1748
1749
                raise EnvironmentError(
                    f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from "
                    "'https://huggingface.co/models', make sure you don't have a local directory with the same name. "
                    f"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory "
                    f"containing a file named {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or "
                    f"{FLAX_WEIGHTS_NAME}."
1750
                )
1751

thomwolf's avatar
thomwolf committed
1752
            if resolved_archive_file == archive_file:
1753
                logger.info(f"loading weights file {archive_file}")
1754
            else:
1755
                logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}")
1756
        else:
thomwolf's avatar
thomwolf committed
1757
            resolved_archive_file = None
1758

Sylvain Gugger's avatar
Sylvain Gugger committed
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
        # We'll need to download and cache each checkpoint shard if the checkpoint is sharded.
        if is_sharded:
            # resolved_archive_file becomes a list of files that point to the different checkpoint shards in this case.
            resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(
                pretrained_model_name_or_path,
                resolved_archive_file,
                cache_dir=cache_dir,
                force_download=force_download,
                proxies=proxies,
                resume_download=resume_download,
                local_files_only=local_files_only,
                use_auth_token=use_auth_token,
                user_agent=user_agent,
                revision=revision,
                mirror=mirror,
            )

1776
1777
        # load pt weights early so that we know which dtype to init the model under
        if from_pt:
1778
            if not is_sharded and state_dict is None:
Sylvain Gugger's avatar
Sylvain Gugger committed
1779
1780
                # Time to load the checkpoint
                state_dict = load_state_dict(resolved_archive_file)
1781
1782
1783
1784
1785
1786
1787
1788
1789
            # set dtype to instantiate the model under:
            # 1. If torch_dtype is not None, we use that dtype
            # 2. If torch_dtype is "auto", we auto-detect dtype from the loaded state_dict, by checking its first
            #    weights entry - we assume all weights are of the same dtype
            # we also may have config.torch_dtype available, but we won't rely on it till v5
            dtype_orig = None
            if torch_dtype is not None:
                if isinstance(torch_dtype, str):
                    if torch_dtype == "auto":
Sylvain Gugger's avatar
Sylvain Gugger committed
1790
1791
1792
1793
1794
1795
1796
1797
                        if is_sharded and "dtype" in sharded_metadata:
                            torch_dtype = sharded_metadata["dtype"]
                        elif not is_sharded:
                            torch_dtype = next(iter(state_dict.values())).dtype
                        else:
                            one_state_dict = load_state_dict(resolved_archive_file)
                            torch_dtype = next(iter(one_state_dict.values())).dtype
                            del one_state_dict  # free CPU memory
1798
1799
1800
1801
1802
1803
                    else:
                        raise ValueError(
                            f"`torch_dtype` can be either a `torch.dtype` or `auto`, but received {torch_dtype}"
                        )
                dtype_orig = cls._set_default_torch_dtype(torch_dtype)

1804
1805
            if low_cpu_mem_usage:
                # save the keys
Sylvain Gugger's avatar
Sylvain Gugger committed
1806
1807
1808
1809
1810
                if is_sharded:
                    loaded_state_dict_keys = sharded_metadata["all_checkpoint_keys"]
                else:
                    loaded_state_dict_keys = [k for k in state_dict.keys()]
                    del state_dict  # free CPU memory - will reload again later
1811

1812
1813
        config.name_or_path = pretrained_model_name_or_path

1814
        # Instantiate model.
1815
1816
1817
1818
        if is_deepspeed_zero3_enabled():
            import deepspeed

            logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
1819
1820
            # this immediately partitions the model across all gpus, to avoid the overhead in time
            # and memory copying it on CPU or each GPU first
1821
            with deepspeed.zero.Init(config_dict_or_path=deepspeed_config()):
1822
1823
                with no_init_weights(_enable=_fast_init):
                    model = cls(config, *model_args, **model_kwargs)
1824
        else:
1825
1826
            with no_init_weights(_enable=_fast_init):
                model = cls(config, *model_args, **model_kwargs)
1827

1828
1829
1830
1831
1832
        if from_pt:
            # restore default dtype
            if dtype_orig is not None:
                torch.set_default_dtype(dtype_orig)

1833
        if from_tf:
1834
            if resolved_archive_file.endswith(".index"):
1835
1836
1837
1838
1839
                # Load from a TensorFlow 1.X checkpoint - provided by original authors
                model = cls.load_tf_weights(model, config, resolved_archive_file[:-6])  # Remove the '.index'
            else:
                # Load from our TensorFlow 2.0 checkpoints
                try:
1840
                    from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model
1841

1842
                    model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)
1843
                except ImportError:
1844
1845
1846
1847
                    logger.error(
                        "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
                        "https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
                    )
1848
                    raise
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
        elif from_flax:
            try:
                from .modeling_flax_pytorch_utils import load_flax_checkpoint_in_pytorch_model

                model = load_flax_checkpoint_in_pytorch_model(model, resolved_archive_file)
            except ImportError:
                logger.error(
                    "Loading a Flax model in PyTorch, requires both PyTorch and Flax to be installed. Please see "
                    "https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation instructions."
                )
                raise
1860
        elif from_pt:
1861
1862

            if low_cpu_mem_usage:
Sylvain Gugger's avatar
Sylvain Gugger committed
1863
                cls._load_pretrained_model_low_mem(model, loaded_state_dict_keys, resolved_archive_file)
1864
            else:
Sylvain Gugger's avatar
Sylvain Gugger committed
1865
                model, missing_keys, unexpected_keys, mismatched_keys, error_msgs = cls._load_pretrained_model(
1866
1867
                    model,
                    state_dict,
Sylvain Gugger's avatar
Sylvain Gugger committed
1868
                    resolved_archive_file,
1869
1870
                    pretrained_model_name_or_path,
                    ignore_mismatched_sizes=ignore_mismatched_sizes,
Sylvain Gugger's avatar
Sylvain Gugger committed
1871
                    sharded_metadata=sharded_metadata,
1872
1873
                    _fast_init=_fast_init,
                )
1874

1875
1876
        # make sure token embedding weights are still tied if needed
        model.tie_weights()
1877

1878
        # Set model in evaluation mode to deactivate DropOut modules by default
1879
1880
        model.eval()

thomwolf's avatar
thomwolf committed
1881
        if output_loading_info:
1882
1883
1884
            loading_info = {
                "missing_keys": missing_keys,
                "unexpected_keys": unexpected_keys,
1885
                "mismatched_keys": mismatched_keys,
1886
1887
                "error_msgs": error_msgs,
            }
thomwolf's avatar
thomwolf committed
1888
1889
            return model, loading_info

1890
1891
        return model

1892
    @classmethod
Sylvain Gugger's avatar
Sylvain Gugger committed
1893
1894
1895
1896
1897
1898
1899
1900
1901
    def _load_pretrained_model(
        cls,
        model,
        state_dict,
        resolved_archive_file,
        pretrained_model_name_or_path,
        ignore_mismatched_sizes=False,
        sharded_metadata=None,
        _fast_init=True,
1902
    ):
1903
        # Retrieve missing & unexpected_keys
1904
1905
        model_state_dict = model.state_dict()
        expected_keys = list(model_state_dict.keys())
Sylvain Gugger's avatar
Sylvain Gugger committed
1906
        loaded_keys = list(state_dict.keys()) if state_dict is not None else sharded_metadata["all_checkpoint_keys"]
1907
1908
        prefix = model.base_model_prefix

Sylvain Gugger's avatar
Sylvain Gugger committed
1909
1910
1911
1912
1913
1914
1915
1916
1917
        def _fix_key(key):
            if "beta" in key:
                return key.replace("beta", "bias")
            if "gamma" in key:
                return key.replace("gamma", "weight")
            return key

        loaded_keys = [_fix_key(key) for key in loaded_keys]

1918
1919
1920
1921
1922
1923
        if len(prefix) > 0:
            has_prefix_module = any(s.startswith(prefix) for s in loaded_keys)
            expects_prefix_module = any(s.startswith(prefix) for s in expected_keys)
        else:
            has_prefix_module = False
            expects_prefix_module = False
Patrick von Platen's avatar
Patrick von Platen committed
1924
1925
1926

        # key re-naming operations are never done on the keys
        # that are loaded, but always on the keys of the newly initialized model
1927
1928
        remove_prefix_from_model = not has_prefix_module and expects_prefix_module
        add_prefix_to_model = has_prefix_module and not expects_prefix_module
1929

1930
        if remove_prefix_from_model:
1931
            expected_keys_not_prefixed = [s for s in expected_keys if not s.startswith(prefix)]
1932
            expected_keys = [".".join(s.split(".")[1:]) if s.startswith(prefix) else s for s in expected_keys]
1933
        elif add_prefix_to_model:
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
            expected_keys = [".".join([prefix, s]) for s in expected_keys]

        missing_keys = list(set(expected_keys) - set(loaded_keys))
        unexpected_keys = list(set(loaded_keys) - set(expected_keys))

        # Some models may have keys that are not in the state by design, removing them before needlessly warning
        # the user.
        if cls._keys_to_ignore_on_load_missing is not None:
            for pat in cls._keys_to_ignore_on_load_missing:
                missing_keys = [k for k in missing_keys if re.search(pat, k) is None]

        if cls._keys_to_ignore_on_load_unexpected is not None:
            for pat in cls._keys_to_ignore_on_load_unexpected:
                unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]

1949
1950
        if _fast_init:
            # retrieve unintialized modules and initialize
1951
            uninitialized_modules = model.retrieve_modules_from_names(
1952
                missing_keys, add_prefix=add_prefix_to_model, remove_prefix=remove_prefix_from_model
1953
            )
1954
            for module in uninitialized_modules:
1955
1956
                model._init_weights(module)

1957
1958
1959
        # Make sure we are able to load base models as well as derived models (with heads)
        start_prefix = ""
        model_to_load = model
1960
        if len(cls.base_model_prefix) > 0 and not hasattr(model, cls.base_model_prefix) and has_prefix_module:
1961
            start_prefix = cls.base_model_prefix + "."
1962
        if len(cls.base_model_prefix) > 0 and hasattr(model, cls.base_model_prefix) and not has_prefix_module:
1963
            model_to_load = getattr(model, cls.base_model_prefix)
1964
1965
1966
1967
1968
            if any(key in expected_keys_not_prefixed for key in loaded_keys):
                raise ValueError(
                    "The state dictionary of the model you are training to load is corrupted. Are you sure it was "
                    "properly saved?"
                )
1969

1970
1971
1972
1973
1974
1975
1976
1977
        def _find_mismatched_keys(
            state_dict,
            model_state_dict,
            loaded_keys,
            add_prefix_to_model,
            remove_prefix_from_model,
            ignore_mismatched_sizes,
        ):
Sylvain Gugger's avatar
Sylvain Gugger committed
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
            mismatched_keys = []
            if ignore_mismatched_sizes:
                for checkpoint_key in loaded_keys:
                    model_key = checkpoint_key
                    if remove_prefix_from_model:
                        # The model key starts with `prefix` but `checkpoint_key` doesn't so we add it.
                        model_key = f"{prefix}.{checkpoint_key}"
                    elif add_prefix_to_model:
                        # The model key doesn't start with `prefix` but `checkpoint_key` does so we remove it.
                        model_key = ".".join(checkpoint_key.split(".")[1:])

                    if (
                        model_key in model_state_dict
                        and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape
                    ):
                        mismatched_keys.append(
                            (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape)
                        )
                        del state_dict[checkpoint_key]

1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
            return mismatched_keys

        if state_dict is not None:
            # Whole checkpoint
            mismatched_keys = _find_mismatched_keys(
                state_dict,
                model_state_dict,
                loaded_keys,
                add_prefix_to_model,
                remove_prefix_from_model,
                ignore_mismatched_sizes,
            )
Sylvain Gugger's avatar
Sylvain Gugger committed
2010
2011
2012
2013
2014
2015
2016
2017
            error_msgs = _load_state_dict_into_model(model_to_load, state_dict, start_prefix)
        else:
            # Sharded checkpoint
            # This should always be a list but, just to be sure.
            if not isinstance(resolved_archive_file, list):
                resolved_archive_file = [resolved_archive_file]

            error_msgs = []
2018
            mismatched_keys = []
Sylvain Gugger's avatar
Sylvain Gugger committed
2019
2020
2021
2022
            for shard_file in resolved_archive_file:
                state_dict = load_state_dict(shard_file)
                # Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not
                # matching the weights in the model.
2023
2024
2025
2026
2027
2028
2029
2030
                mismatched_keys += _find_mismatched_keys(
                    state_dict,
                    model_state_dict,
                    loaded_keys,
                    add_prefix_to_model,
                    remove_prefix_from_model,
                    ignore_mismatched_sizes,
                )
Sylvain Gugger's avatar
Sylvain Gugger committed
2031
                error_msgs += _load_state_dict_into_model(model_to_load, state_dict, start_prefix)
2032

2033
2034
2035
2036
        if len(error_msgs) > 0:
            error_msg = "\n\t".join(error_msgs)
            raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}")

2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
        if len(unexpected_keys) > 0:
            logger.warning(
                f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when "
                f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
                f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
                f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
                f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
                f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
            )
        else:
            logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
        if len(missing_keys) > 0:
            logger.warning(
                f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
                f"and are newly initialized: {missing_keys}\n"
                f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
            )
2054
        elif len(mismatched_keys) == 0:
2055
2056
2057
2058
2059
            logger.info(
                f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
                f"If your task is similar to the task the model of the checkpoint was trained on, "
                f"you can already use {model.__class__.__name__} for predictions without further training."
            )
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
        if len(mismatched_keys) > 0:
            mismatched_warning = "\n".join(
                [
                    f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
                    for key, shape1, shape2 in mismatched_keys
                ]
            )
            logger.warning(
                f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
                f"and are newly initialized because the shapes did not match:\n{mismatched_warning}\n"
                f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
            )
2072

2073
        return model, missing_keys, unexpected_keys, mismatched_keys, error_msgs
2074
2075
2076
2077

    def retrieve_modules_from_names(self, names, add_prefix=False, remove_prefix=False):
        module_keys = set([".".join(key.split(".")[:-1]) for key in names])

Patrick von Platen's avatar
Patrick von Platen committed
2078
2079
2080
2081
        # torch.nn.ParameterList is a special case where two parameter keywords
        # are appended to the module name, *e.g.* bert.special_embeddings.0
        module_keys = module_keys.union(set([".".join(key.split(".")[:-2]) for key in names if key[-1].isdigit()]))

2082
2083
2084
2085
2086
2087
        retrieved_modules = []
        # retrieve all modules that has at least one missing weight name
        for name, module in self.named_modules():
            if remove_prefix:
                name = ".".join(name.split(".")[1:]) if name.startswith(self.base_model_prefix) else name
            elif add_prefix:
Patrick von Platen's avatar
Patrick von Platen committed
2088
                name = ".".join([self.base_model_prefix, name]) if len(name) > 0 else self.base_model_prefix
2089
2090
2091
2092
2093
2094

            if name in module_keys:
                retrieved_modules.append(module)

        return retrieved_modules

2095
2096
    @staticmethod
    def _load_pretrained_model_low_mem(model, loaded_state_dict_keys, resolved_archive_file):
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
        """
        This is an experimental function that loads the model using ~1.x model size CPU memory

        Before it gets called we do:

        1. save which state_dict keys we have
        2. drop state_dict before model is created, since the latter takes 1x model size memory

        Here then we continue:

        3. switch to the meta device all params/buffers that are going to be replaced from the loaded state_dict
        4. load state_dict 2nd time
        5. replace the params/buffers from the state_dict

        Currently, it doesn't handle missing_keys, unexpected_keys, mismatched_keys. It can't handle deepspeed.
        """
        require_version_core("torch>=1.9")
        if is_deepspeed_zero3_enabled():
            raise ValueError("low_cpu_mem_usage arg cannot be used with DeepSpeed ZeRO-3")

        # a helper util to find the last sub-module and the param/buffer name
        def find_submodule_and_param_name(model, long_key):
            split_key = long_key.split(".")
            submodule = model
            while len(split_key) > 1:
                if hasattr(submodule, split_key[0]):
                    submodule = getattr(submodule, split_key[0])
                    del split_key[0]
                else:
                    submodule = None
                    break
            return submodule, split_key[0]

        # dematerialize param storage for keys that are going to be replaced by state_dict, by
        # putting those on the meta device
        for k in loaded_state_dict_keys:
            submodule, param_name = find_submodule_and_param_name(model, k)
            if submodule is not None:
                # selectively switch to the meta device only those params/buffers that will
                # be next replaced from state_dict. This a complex way to do p.to_("meta")
                # since we have no in-place to_ for tensors.
                new_val = getattr(submodule, param_name)
                if isinstance(new_val, torch.nn.Parameter):
                    # isinstance returns False for Params on meta device, so switch after the check
                    new_val = torch.nn.Parameter(new_val.to("meta"))
                else:
                    new_val = new_val.to("meta")
                setattr(submodule, param_name, new_val)

Sylvain Gugger's avatar
Sylvain Gugger committed
2146
2147
2148
        # only now can load state_dict(s)
        if not isinstance(resolved_archive_file, list):
            resolved_archive_file = [resolved_archive_file]
2149

Sylvain Gugger's avatar
Sylvain Gugger committed
2150
        for archive_file in resolved_archive_file:
2151
            state_dict = torch.load(archive_file, map_location="cpu")
2152

Sylvain Gugger's avatar
Sylvain Gugger committed
2153
2154
            # materialize state_dict entries one by one on CPU
            for k in loaded_state_dict_keys:
2155
2156
2157
2158
2159
2160
2161
2162
                if k in state_dict:
                    submodule, param_name = find_submodule_and_param_name(model, k)
                    if submodule is not None:
                        param_dtype = getattr(submodule, param_name).dtype
                        new_val = state_dict[k].to(param_dtype)
                        if isinstance(getattr(submodule, param_name), torch.nn.Parameter):
                            new_val = torch.nn.Parameter(new_val)
                        setattr(submodule, param_name, new_val)
Sylvain Gugger's avatar
Sylvain Gugger committed
2163
2164

            del state_dict
2165

2166
2167
2168
2169
2170
2171
    @classmethod
    def register_for_auto_class(cls, auto_class="AutoModel"):
        """
        Register this class with a given auto class. This should only be used for custom models as the ones in the
        library are already mapped with an auto class.

2172
2173
2174
2175
2176
2177
        <Tip warning={true}>

        This API is experimental and may have some slight breaking changes in the next releases.

        </Tip>

2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
        Args:
            auto_class (`str` or `type`, *optional*, defaults to `"AutoModel"`):
                The auto class to register this new model with.
        """
        if not isinstance(auto_class, str):
            auto_class = auto_class.__name__

        import transformers.models.auto as auto_module

        if not hasattr(auto_module, auto_class):
            raise ValueError(f"{auto_class} is not a valid auto class.")

        cls._auto_class = auto_class

Sylvain Gugger's avatar
Sylvain Gugger committed
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
    def push_to_hub(
        self,
        repo_path_or_name: Optional[str] = None,
        repo_url: Optional[str] = None,
        use_temp_dir: bool = False,
        commit_message: str = "add model",
        organization: Optional[str] = None,
        private: Optional[bool] = None,
        use_auth_token: Optional[Union[bool, str]] = None,
        max_shard_size: Union[int, str] = "10GB",
        **model_card_kwargs
    ) -> str:
        """
        Upload the model files to the 🤗 Model Hub while synchronizing a local clone of the repo in `repo_path_or_name`.
thomwolf's avatar
thomwolf committed
2206

Sylvain Gugger's avatar
Sylvain Gugger committed
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
        Parameters:
            repo_path_or_name (`str`, *optional*):
                Can either be a repository name for your model in the Hub or a path to a local folder (in which case
                the repository will have the name of that local folder). If not specified, will default to the name
                given by `repo_url` and a local directory with that name will be created.
            repo_url (`str`, *optional*):
                Specify this in case you want to push to an existing repository in the hub. If unspecified, a new
                repository will be created in your namespace (unless you specify an `organization`) with `repo_name`.
            use_temp_dir (`bool`, *optional*, defaults to `False`):
                Whether or not to clone the distant repo in a temporary directory or in `repo_path_or_name` inside the
                current working directory. This will slow things down if you are making changes in an existing repo
                since you will need to clone the repo before every push.
            commit_message (`str`, *optional*, defaults to `"add model"`):
                Message to commit while pushing.
            organization (`str`, *optional*):
                Organization in which you want to push your {object} (you must be a member of this organization).
            private (`bool`, *optional*):
                Whether or not the repository created should be private (requires a paying subscription).
            use_auth_token (`bool` or `str`, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
                when running `transformers-cli login` (stored in `~/.huggingface`). Will default to `True` if
                `repo_url` is not specified.
            max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
                The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size
                lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`).

                <Tip warning={true}>

                If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard
                which will be bigger than `max_shard_size`.

                </Tip>

        Returns:
            `str`: The url of the commit of your {object} in the given repository.

        Examples:

        ```python
        from transformers import AutoModel

        model = AutoModel.from_pretrained("bert-base-cased")

        # Push the model to your namespace with the name "my-finetuned-bert" and have a local clone in the
        # *my-finetuned-bert* folder.
        model.push_to_hub("my-finetuned-bert")

        # Push the model to your namespace with the name "my-finetuned-bert" with no local clone.
        model.push_to_hub("my-finetuned-bert", use_temp_dir=True)

        # Push the model to an organization with the name "my-finetuned-bert" and have a local clone in the
        # *my-finetuned-bert* folder.
        model.push_to_hub("my-finetuned-bert", organization="huggingface")

        # Make a change to an existing repo that has been cloned locally in *my-finetuned-bert*.
        model.push_to_hub("my-finetuned-bert", repo_url="https://huggingface.co/sgugger/my-finetuned-bert")
        ```
        """
        if use_temp_dir:
            # Make sure we use the right `repo_name` for the `repo_url` before replacing it.
            if repo_url is None:
                if use_auth_token is None:
                    use_auth_token = True
                repo_name = Path(repo_path_or_name).name
                repo_url = self._get_repo_url_from_name(
                    repo_name, organization=organization, private=private, use_auth_token=use_auth_token
                )
            repo_path_or_name = tempfile.mkdtemp()

        # Create or clone the repo. If the repo is already cloned, this just retrieves the path to the repo.
        repo = self._create_or_get_repo(
            repo_path_or_name=repo_path_or_name,
            repo_url=repo_url,
            organization=organization,
            private=private,
            use_auth_token=use_auth_token,
        )
        # Save the files in the cloned repo
        self.save_pretrained(repo_path_or_name, max_shard_size=max_shard_size)

        # Commit and push!
        url = self._push_to_hub(repo, commit_message=commit_message)

        # Clean up! Clean up! Everybody everywhere!
        if use_temp_dir:
            shutil.rmtree(repo_path_or_name)

        return url
2295
2296


thomwolf's avatar
thomwolf committed
2297
class PoolerStartLogits(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
2298
2299
    """
    Compute SQuAD start logits from sequence hidden states.
2300

Sylvain Gugger's avatar
Sylvain Gugger committed
2301
    Args:
2302
2303
        config ([`PretrainedConfig`]):
            The config used by the model, will be used to grab the `hidden_size` of the model.
Sylvain Gugger's avatar
Sylvain Gugger committed
2304
2305
2306
    """

    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
2307
        super().__init__()
thomwolf's avatar
thomwolf committed
2308
2309
        self.dense = nn.Linear(config.hidden_size, 1)

Sylvain Gugger's avatar
Sylvain Gugger committed
2310
2311
2312
2313
2314
    def forward(
        self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None
    ) -> torch.FloatTensor:
        """
        Args:
2315
            hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`):
Sylvain Gugger's avatar
Sylvain Gugger committed
2316
                The final hidden states of the model.
2317
            p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
2318
2319
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
Sylvain Gugger's avatar
Sylvain Gugger committed
2320
2321

        Returns:
2322
            `torch.FloatTensor`: The start logits for SQuAD.
thomwolf's avatar
thomwolf committed
2323
        """
thomwolf's avatar
thomwolf committed
2324
2325
2326
        x = self.dense(hidden_states).squeeze(-1)

        if p_mask is not None:
Lysandre Debut's avatar
Lysandre Debut committed
2327
            if get_parameter_dtype(self) == torch.float16:
2328
2329
2330
                x = x * (1 - p_mask) - 65500 * p_mask
            else:
                x = x * (1 - p_mask) - 1e30 * p_mask
thomwolf's avatar
thomwolf committed
2331
2332
2333
2334
2335
2336

        return x


class PoolerEndLogits(nn.Module):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
2337
    Compute SQuAD end logits from sequence hidden states.
2338

Sylvain Gugger's avatar
Sylvain Gugger committed
2339
    Args:
2340
        config ([`PretrainedConfig`]):
Sylvain Gugger's avatar
Sylvain Gugger committed
2341
2342
            The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps`
            to use.
Sylvain Gugger's avatar
Sylvain Gugger committed
2343
2344
2345
    """

    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
2346
        super().__init__()
thomwolf's avatar
thomwolf committed
2347
2348
2349
2350
2351
        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
        self.activation = nn.Tanh()
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dense_1 = nn.Linear(config.hidden_size, 1)

Sylvain Gugger's avatar
Sylvain Gugger committed
2352
2353
2354
2355
2356
2357
2358
2359
2360
    def forward(
        self,
        hidden_states: torch.FloatTensor,
        start_states: Optional[torch.FloatTensor] = None,
        start_positions: Optional[torch.LongTensor] = None,
        p_mask: Optional[torch.FloatTensor] = None,
    ) -> torch.FloatTensor:
        """
        Args:
2361
            hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`):
Sylvain Gugger's avatar
Sylvain Gugger committed
2362
                The final hidden states of the model.
2363
            start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
2364
                The hidden states of the first tokens for the labeled span.
2365
            start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
2366
                The position of the first token for the labeled span.
2367
            p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
2368
2369
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
Sylvain Gugger's avatar
Sylvain Gugger committed
2370

2371
        <Tip>
Sylvain Gugger's avatar
Sylvain Gugger committed
2372

Stas Bekman's avatar
Stas Bekman committed
2373
2374
        One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides
        `start_states`.
2375
2376

        </Tip>
Sylvain Gugger's avatar
Sylvain Gugger committed
2377
2378

        Returns:
2379
            `torch.FloatTensor`: The end logits for SQuAD.
thomwolf's avatar
thomwolf committed
2380
        """
2381
2382
2383
        assert (
            start_states is not None or start_positions is not None
        ), "One of start_states, start_positions should be not None"
thomwolf's avatar
thomwolf committed
2384
        if start_positions is not None:
2385
            slen, hsz = hidden_states.shape[-2:]
2386
2387
2388
            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            start_states = hidden_states.gather(-2, start_positions)  # shape (bsz, 1, hsz)
            start_states = start_states.expand(-1, slen, -1)  # shape (bsz, slen, hsz)
thomwolf's avatar
thomwolf committed
2389
2390
2391
2392
2393
2394
2395

        x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
        x = self.activation(x)
        x = self.LayerNorm(x)
        x = self.dense_1(x).squeeze(-1)

        if p_mask is not None:
Lysandre Debut's avatar
Lysandre Debut committed
2396
            if get_parameter_dtype(self) == torch.float16:
2397
2398
2399
                x = x * (1 - p_mask) - 65500 * p_mask
            else:
                x = x * (1 - p_mask) - 1e30 * p_mask
thomwolf's avatar
thomwolf committed
2400
2401
2402
2403
2404

        return x


class PoolerAnswerClass(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
2405
2406
2407
2408
    """
    Compute SQuAD 2.0 answer class from classification and start tokens hidden states.

    Args:
2409
2410
        config ([`PretrainedConfig`]):
            The config used by the model, will be used to grab the `hidden_size` of the model.
Sylvain Gugger's avatar
Sylvain Gugger committed
2411
    """
2412

thomwolf's avatar
thomwolf committed
2413
    def __init__(self, config):
Julien Chaumond's avatar
Julien Chaumond committed
2414
        super().__init__()
thomwolf's avatar
thomwolf committed
2415
2416
2417
2418
        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
        self.activation = nn.Tanh()
        self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)

Sylvain Gugger's avatar
Sylvain Gugger committed
2419
2420
2421
2422
2423
2424
2425
    def forward(
        self,
        hidden_states: torch.FloatTensor,
        start_states: Optional[torch.FloatTensor] = None,
        start_positions: Optional[torch.LongTensor] = None,
        cls_index: Optional[torch.LongTensor] = None,
    ) -> torch.FloatTensor:
2426
2427
        """
        Args:
2428
            hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`):
Sylvain Gugger's avatar
Sylvain Gugger committed
2429
                The final hidden states of the model.
2430
            start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
2431
                The hidden states of the first tokens for the labeled span.
2432
            start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
2433
                The position of the first token for the labeled span.
2434
2435
2436
2437
            cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
                Position of the CLS token for each sentence in the batch. If `None`, takes the last token.

        <Tip>
Sylvain Gugger's avatar
Sylvain Gugger committed
2438

Stas Bekman's avatar
Stas Bekman committed
2439
2440
        One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides
        `start_states`.
Sylvain Gugger's avatar
Sylvain Gugger committed
2441

2442
        </Tip>
Sylvain Gugger's avatar
Sylvain Gugger committed
2443
2444

        Returns:
2445
            `torch.FloatTensor`: The SQuAD 2.0 answer class.
thomwolf's avatar
thomwolf committed
2446
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
2447
        # No dependency on end_feature so that we can obtain one single `cls_logits` for each sample.
2448
        hsz = hidden_states.shape[-1]
2449
2450
2451
        assert (
            start_states is not None or start_positions is not None
        ), "One of start_states, start_positions should be not None"
thomwolf's avatar
thomwolf committed
2452
        if start_positions is not None:
2453
2454
            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            start_states = hidden_states.gather(-2, start_positions).squeeze(-2)  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
2455
2456

        if cls_index is not None:
2457
2458
            cls_index = cls_index[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2)  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
2459
        else:
2460
            cls_token_state = hidden_states[:, -1, :]  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
2461
2462
2463
2464
2465
2466
2467
2468

        x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
        x = self.activation(x)
        x = self.dense_1(x).squeeze(-1)

        return x


2469
2470
2471
@dataclass
class SquadHeadOutput(ModelOutput):
    """
2472
    Base class for outputs of question answering models using a [`~modeling_utils.SQuADHead`].
2473
2474

    Args:
2475
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided):
Sylvain Gugger's avatar
Sylvain Gugger committed
2476
2477
            Classification loss as the sum of start token, end token (and is_impossible if provided) classification
            losses.
2478
        start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
2479
            Log probabilities for the top config.start_n_top start token possibilities (beam-search).
2480
        start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
2481
            Indices for the top config.start_n_top start token possibilities (beam-search).
2482
2483
        end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
            Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities
Sylvain Gugger's avatar
Sylvain Gugger committed
2484
            (beam-search).
2485
2486
2487
2488
        end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
            Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search).
        cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
            Log probabilities for the `is_impossible` label of the answers.
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499

    """

    loss: Optional[torch.FloatTensor] = None
    start_top_log_probs: Optional[torch.FloatTensor] = None
    start_top_index: Optional[torch.LongTensor] = None
    end_top_log_probs: Optional[torch.FloatTensor] = None
    end_top_index: Optional[torch.LongTensor] = None
    cls_logits: Optional[torch.FloatTensor] = None


thomwolf's avatar
thomwolf committed
2500
class SQuADHead(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
2501
2502
    r"""
    A SQuAD head inspired by XLNet.
2503

Sylvain Gugger's avatar
Sylvain Gugger committed
2504
    Args:
2505
        config ([`PretrainedConfig`]):
Sylvain Gugger's avatar
Sylvain Gugger committed
2506
2507
            The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps`
            to use.
thomwolf's avatar
thomwolf committed
2508
    """
2509

thomwolf's avatar
thomwolf committed
2510
    def __init__(self, config):
Julien Chaumond's avatar
Julien Chaumond committed
2511
        super().__init__()
thomwolf's avatar
thomwolf committed
2512
2513
2514
2515
2516
2517
2518
        self.start_n_top = config.start_n_top
        self.end_n_top = config.end_n_top

        self.start_logits = PoolerStartLogits(config)
        self.end_logits = PoolerEndLogits(config)
        self.answer_class = PoolerAnswerClass(config)

Sylvain Gugger's avatar
Sylvain Gugger committed
2519
    @replace_return_docstrings(output_type=SquadHeadOutput, config_class=PretrainedConfig)
2520
    def forward(
2521
        self,
Sylvain Gugger's avatar
Sylvain Gugger committed
2522
2523
2524
2525
2526
2527
        hidden_states: torch.FloatTensor,
        start_positions: Optional[torch.LongTensor] = None,
        end_positions: Optional[torch.LongTensor] = None,
        cls_index: Optional[torch.LongTensor] = None,
        is_impossible: Optional[torch.LongTensor] = None,
        p_mask: Optional[torch.FloatTensor] = None,
2528
        return_dict: bool = False,
Sylvain Gugger's avatar
Sylvain Gugger committed
2529
2530
    ) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]:
        """
Lysandre's avatar
Lysandre committed
2531
        Args:
2532
            hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`):
Lysandre's avatar
Lysandre committed
2533
                Final hidden states of the model on the sequence tokens.
2534
            start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Lysandre's avatar
Lysandre committed
2535
                Positions of the first token for the labeled span.
2536
            end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Lysandre's avatar
Lysandre committed
2537
                Positions of the last token for the labeled span.
2538
2539
2540
            cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
                Position of the CLS token for each sentence in the batch. If `None`, takes the last token.
            is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Lysandre's avatar
Lysandre committed
2541
                Whether the question has a possible answer in the paragraph or not.
2542
            p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
2543
2544
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
2545
            return_dict (`bool`, *optional*, defaults to `False`):
2546
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Sylvain Gugger's avatar
Sylvain Gugger committed
2547

Lysandre's avatar
Lysandre committed
2548
        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
2549
        """
thomwolf's avatar
thomwolf committed
2550
        start_logits = self.start_logits(hidden_states, p_mask=p_mask)
thomwolf's avatar
thomwolf committed
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573

        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, let's remove the dimension added by batch splitting
            for x in (start_positions, end_positions, cls_index, is_impossible):
                if x is not None and x.dim() > 1:
                    x.squeeze_(-1)

            # during training, compute the end logits based on the ground truth of the start position
            end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)

            loss_fct = CrossEntropyLoss()
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2

            if cls_index is not None and is_impossible is not None:
                # Predict answerability from the representation of CLS and START
                cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
                loss_fct_cls = nn.BCEWithLogitsLoss()
                cls_loss = loss_fct_cls(cls_logits, is_impossible)

                # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
                total_loss += cls_loss * 0.5
2574

2575
            return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,)
thomwolf's avatar
thomwolf committed
2576
2577
2578
2579

        else:
            # during inference, compute the end logits based on beam search
            bsz, slen, hsz = hidden_states.size()
2580
            start_log_probs = nn.functional.softmax(start_logits, dim=-1)  # shape (bsz, slen)
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591

            start_top_log_probs, start_top_index = torch.topk(
                start_log_probs, self.start_n_top, dim=-1
            )  # shape (bsz, start_n_top)
            start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz)  # shape (bsz, start_n_top, hsz)
            start_states = torch.gather(hidden_states, -2, start_top_index_exp)  # shape (bsz, start_n_top, hsz)
            start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1)  # shape (bsz, slen, start_n_top, hsz)

            hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
                start_states
            )  # shape (bsz, slen, start_n_top, hsz)
thomwolf's avatar
thomwolf committed
2592
2593
            p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
            end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
2594
            end_log_probs = nn.functional.softmax(end_logits, dim=1)  # shape (bsz, slen, start_n_top)
thomwolf's avatar
thomwolf committed
2595

2596
2597
2598
            end_top_log_probs, end_top_index = torch.topk(
                end_log_probs, self.end_n_top, dim=1
            )  # shape (bsz, end_n_top, start_n_top)
thomwolf's avatar
thomwolf committed
2599
2600
2601
2602
2603
2604
            end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
            end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)

            start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
            cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)

2605
            if not return_dict:
2606
2607
2608
2609
2610
2611
2612
2613
2614
                return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits)
            else:
                return SquadHeadOutput(
                    start_top_log_probs=start_top_log_probs,
                    start_top_index=start_top_index,
                    end_top_log_probs=end_top_log_probs,
                    end_top_index=end_top_index,
                    cls_logits=cls_logits,
                )
thomwolf's avatar
thomwolf committed
2615
2616
2617


class SequenceSummary(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
2618
2619
2620
2621
    r"""
    Compute a single vector summary of a sequence hidden states.

    Args:
2622
        config ([`PretrainedConfig`]):
Sylvain Gugger's avatar
Sylvain Gugger committed
2623
2624
            The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
            config class of your model for the default values it uses):
Sylvain Gugger's avatar
Sylvain Gugger committed
2625

2626
            - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are:
Sylvain Gugger's avatar
Sylvain Gugger committed
2627

2628
2629
2630
2631
2632
                - `"last"` -- Take the last token hidden state (like XLNet)
                - `"first"` -- Take the first token hidden state (like Bert)
                - `"mean"` -- Take the mean of all tokens hidden states
                - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
                - `"attn"` -- Not implemented now, use multi-head attention
Sylvain Gugger's avatar
Sylvain Gugger committed
2633

2634
            - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.
Sylvain Gugger's avatar
Sylvain Gugger committed
2635
2636
2637
2638
2639
2640
            - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes
              (otherwise to `config.hidden_size`).
            - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output,
              another string or `None` will add no activation.
            - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.
            - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.
thomwolf's avatar
thomwolf committed
2641
    """
2642

2643
    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
2644
        super().__init__()
thomwolf's avatar
thomwolf committed
2645

2646
        self.summary_type = getattr(config, "summary_type", "last")
2647
        if self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
2648
2649
2650
2651
2652
            # We should use a standard multi-head attention module with absolute positional embedding for that.
            # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
            # We can probably just use the multi-head attention module of PyTorch >=1.1.0
            raise NotImplementedError

thomwolf's avatar
thomwolf committed
2653
        self.summary = Identity()
2654
2655
        if hasattr(config, "summary_use_proj") and config.summary_use_proj:
            if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
2656
                num_classes = config.num_labels
thomwolf's avatar
thomwolf committed
2657
2658
2659
2660
            else:
                num_classes = config.hidden_size
            self.summary = nn.Linear(config.hidden_size, num_classes)

2661
        activation_string = getattr(config, "summary_activation", None)
Lysandre's avatar
Lysandre committed
2662
        self.activation: Callable = get_activation(activation_string) if activation_string else Identity()
thomwolf's avatar
thomwolf committed
2663

thomwolf's avatar
thomwolf committed
2664
        self.first_dropout = Identity()
2665
        if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
2666
2667
            self.first_dropout = nn.Dropout(config.summary_first_dropout)

thomwolf's avatar
thomwolf committed
2668
        self.last_dropout = Identity()
2669
        if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
2670
            self.last_dropout = nn.Dropout(config.summary_last_dropout)
thomwolf's avatar
thomwolf committed
2671

Sylvain Gugger's avatar
Sylvain Gugger committed
2672
2673
2674
2675
2676
2677
2678
    def forward(
        self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None
    ) -> torch.FloatTensor:
        """
        Compute a single vector summary of a sequence hidden states.

        Args:
2679
            hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
2680
                The hidden states of the last layer.
2681
            cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
2682
                Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token.
Sylvain Gugger's avatar
Sylvain Gugger committed
2683
2684

        Returns:
2685
            `torch.FloatTensor`: The summary of the sequence hidden states.
thomwolf's avatar
thomwolf committed
2686
        """
2687
        if self.summary_type == "last":
thomwolf's avatar
thomwolf committed
2688
            output = hidden_states[:, -1]
2689
        elif self.summary_type == "first":
thomwolf's avatar
thomwolf committed
2690
            output = hidden_states[:, 0]
2691
        elif self.summary_type == "mean":
thomwolf's avatar
thomwolf committed
2692
            output = hidden_states.mean(dim=1)
2693
        elif self.summary_type == "cls_index":
thomwolf's avatar
thomwolf committed
2694
            if cls_index is None:
Lysandre's avatar
Lysandre committed
2695
2696
2697
2698
2699
                cls_index = torch.full_like(
                    hidden_states[..., :1, :],
                    hidden_states.shape[-2] - 1,
                    dtype=torch.long,
                )
thomwolf's avatar
thomwolf committed
2700
            else:
thomwolf's avatar
thomwolf committed
2701
                cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
2702
                cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
thomwolf's avatar
thomwolf committed
2703
            # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
2704
2705
            output = hidden_states.gather(-2, cls_index).squeeze(-2)  # shape (bsz, XX, hidden_size)
        elif self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
2706
2707
            raise NotImplementedError

2708
        output = self.first_dropout(output)
thomwolf's avatar
thomwolf committed
2709
2710
        output = self.summary(output)
        output = self.activation(output)
2711
        output = self.last_dropout(output)
thomwolf's avatar
thomwolf committed
2712
2713
2714
2715

        return output


2716
def unwrap_model(model: nn.Module) -> nn.Module:
2717
2718
2719
2720
    """
    Recursively unwraps a model from potential containers (as used in distributed training).

    Args:
2721
        model (`torch.nn.Module`): The model to unwrap.
2722
2723
2724
2725
2726
2727
    """
    # since there could be multiple levels of wrapping, unwrap recursively
    if hasattr(model, "module"):
        return unwrap_model(model.module)
    else:
        return model