modeling_utils.py 172 KB
Newer Older
1
# coding=utf-8
thomwolf's avatar
thomwolf committed
2
# Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team.
3
4
5
6
7
8
9
10
11
12
13
14
15
# Copyright (c) 2018, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Sylvain Gugger's avatar
Sylvain Gugger committed
16
import collections
17
import gc
Yih-Dar's avatar
Yih-Dar committed
18
import inspect
Sylvain Gugger's avatar
Sylvain Gugger committed
19
import json
20
import os
21
import re
Sylvain Gugger's avatar
Sylvain Gugger committed
22
23
import shutil
import tempfile
24
import warnings
25
from contextlib import contextmanager
26
from dataclasses import dataclass
27
from functools import partial
28
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
29
30

import torch
31
from packaging import version
Sylvain Gugger's avatar
Sylvain Gugger committed
32
from torch import Tensor, nn
33
from torch.nn import CrossEntropyLoss
34

35
from .activations import get_activation
36
from .configuration_utils import PretrainedConfig
37
from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled
38
from .dynamic_module_utils import custom_object_save
39
from .generation import GenerationConfig, GenerationMixin
40
41
42
43
44
45
46
47
from .pytorch_utils import (  # noqa: F401
    Conv1D,
    apply_chunking_to_forward,
    find_pruneable_heads_and_indices,
    prune_conv1d_layer,
    prune_layer,
    prune_linear_layer,
)
48
from .utils import (
Aymeric Augustin's avatar
Aymeric Augustin committed
49
    DUMMY_INPUTS,
50
    FLAX_WEIGHTS_NAME,
51
52
    SAFE_WEIGHTS_INDEX_NAME,
    SAFE_WEIGHTS_NAME,
53
54
    TF2_WEIGHTS_NAME,
    TF_WEIGHTS_NAME,
Sylvain Gugger's avatar
Sylvain Gugger committed
55
    WEIGHTS_INDEX_NAME,
56
    WEIGHTS_NAME,
57
    ContextManagers,
58
    ModelOutput,
Sylvain Gugger's avatar
Sylvain Gugger committed
59
    PushToHubMixin,
60
    cached_file,
61
    copy_func,
62
    download_url,
63
    has_file,
64
    is_accelerate_available,
65
    is_bitsandbytes_available,
66
    is_offline_mode,
67
    is_remote_url,
68
    is_safetensors_available,
69
    is_torch_tpu_available,
70
    logging,
Sylvain Gugger's avatar
Sylvain Gugger committed
71
    replace_return_docstrings,
72
)
73
74
from .utils.hub import convert_file_size_to_int, get_checkpoint_shard_files
from .utils.import_utils import ENV_VARS_TRUE_VALUES, importlib_metadata, is_sagemaker_mp_enabled
75
from .utils.quantization_config import BitsAndBytesConfig
76
from .utils.versions import require_version_core
77

Aymeric Augustin's avatar
Aymeric Augustin committed
78

79
80
81
XLA_USE_BF16 = os.environ.get("XLA_USE_BF16", "0").upper()
XLA_DOWNCAST_BF16 = os.environ.get("XLA_DOWNCAST_BF16", "0").upper()

82
if is_accelerate_available():
83
    from accelerate import __version__ as accelerate_version
84
85
86
87
88
89
90
91
    from accelerate import dispatch_model, infer_auto_device_map, init_empty_weights
    from accelerate.utils import (
        load_offloaded_weights,
        offload_weight,
        save_offload_index,
        set_module_tensor_to_device,
    )

92
93
94
95
96
    if version.parse(accelerate_version) > version.parse("0.11.0"):
        from accelerate.utils import get_balanced_memory
    else:
        get_balanced_memory = None

97
98
99
100
if is_safetensors_available():
    from safetensors import safe_open
    from safetensors.torch import load_file as safe_load_file
    from safetensors.torch import save_file as safe_save_file
101

Lysandre Debut's avatar
Lysandre Debut committed
102
logger = logging.get_logger(__name__)
103

104
105
106
107

_init_weights = True


108
109
110
111
112
113
114
115
116
if is_sagemaker_mp_enabled():
    import smdistributed.modelparallel.torch as smp
    from smdistributed.modelparallel import __version__ as SMP_VERSION

    IS_SAGEMAKER_MP_POST_1_10 = version.parse(SMP_VERSION) >= version.parse("1.10")
else:
    IS_SAGEMAKER_MP_POST_1_10 = False


117
118
119
120
121
122
123
124
@contextmanager
def no_init_weights(_enable=True):
    """
    Context manager to globally disable weight initialization to speed up loading large models.

    TODO(Patrick): Delete safety argument `_enable=True` at next major version. .
    """
    global _init_weights
125
    old_init_weights = _init_weights
126
127
128
129
130
    if _enable:
        _init_weights = False
    try:
        yield
    finally:
131
        _init_weights = old_init_weights
132
133


thomwolf's avatar
thomwolf committed
134
135
136
137
138
try:
    from torch.nn import Identity
except ImportError:
    # Older PyTorch compatibility
    class Identity(nn.Module):
Lysandre's avatar
Lysandre committed
139
        r"""A placeholder identity operator that is argument-insensitive."""
140

thomwolf's avatar
thomwolf committed
141
        def __init__(self, *args, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
142
            super().__init__()
thomwolf's avatar
thomwolf committed
143
144
145
146

        def forward(self, input):
            return input

147

Lysandre Debut's avatar
Lysandre Debut committed
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
def get_parameter_device(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]):
    try:
        return next(parameter.parameters()).device
    except StopIteration:
        # For nn.DataParallel compatibility in PyTorch 1.5

        def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
            tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
            return tuples

        gen = parameter._named_members(get_members_fn=find_tensor_attributes)
        first_tuple = next(gen)
        return first_tuple[1].device


163
164
165
166
def get_first_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]):
    """
    Returns the first parameter dtype (can be non-floating) or asserts if none were found.
    """
Lysandre Debut's avatar
Lysandre Debut committed
167
168
169
    try:
        return next(parameter.parameters()).dtype
    except StopIteration:
Sylvain Gugger's avatar
Sylvain Gugger committed
170
        # For nn.DataParallel compatibility in PyTorch > 1.5
Lysandre Debut's avatar
Lysandre Debut committed
171
172
173
174
175
176
177
178
179
180

        def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
            tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
            return tuples

        gen = parameter._named_members(get_members_fn=find_tensor_attributes)
        first_tuple = next(gen)
        return first_tuple[1].dtype


181
182
def get_parameter_dtype(parameter: Union[nn.Module, GenerationMixin, "ModuleUtilsMixin"]):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
183
    Returns the first found floating dtype in parameters if there is one, otherwise returns the last dtype it found.
184
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
185
186
187
188
    last_dtype = None
    for t in parameter.parameters():
        last_dtype = t.dtype
        if t.is_floating_point():
189
190
191
            # Adding fix for https://github.com/pytorch/xla/issues/4152
            # Fixes issue where the model code passes a value that is out of range for XLA_USE_BF16=1
            # and XLA_DOWNCAST_BF16=1 so the conversion would cast it to -inf
192
193
194
195
196
            # NOTE: `is_torch_tpu_available()` is checked last as it induces a graph break in torch dynamo
            if XLA_USE_BF16 in ENV_VARS_TRUE_VALUES and is_torch_tpu_available():
                return torch.bfloat16
            if XLA_DOWNCAST_BF16 in ENV_VARS_TRUE_VALUES and is_torch_tpu_available():
                if t.dtype == torch.float:
197
                    return torch.bfloat16
198
199
                if t.dtype == torch.double:
                    return torch.float32
Sylvain Gugger's avatar
Sylvain Gugger committed
200
            return t.dtype
201

Sylvain Gugger's avatar
Sylvain Gugger committed
202
203
204
    if last_dtype is not None:
        # if no floating dtype was found return whatever the first dtype is
        return last_dtype
205

Sylvain Gugger's avatar
Sylvain Gugger committed
206
207
    else:
        # For nn.DataParallel compatibility in PyTorch > 1.5
208
209
210
211
212
        def find_tensor_attributes(module: nn.Module) -> List[Tuple[str, Tensor]]:
            tuples = [(k, v) for k, v in module.__dict__.items() if torch.is_tensor(v)]
            return tuples

        gen = parameter._named_members(get_members_fn=find_tensor_attributes)
Sylvain Gugger's avatar
Sylvain Gugger committed
213
        last_tuple = None
214
        for tuple in gen:
Sylvain Gugger's avatar
Sylvain Gugger committed
215
            last_tuple = tuple
216
217
            if tuple[1].is_floating_point():
                return tuple[1].dtype
Sylvain Gugger's avatar
Sylvain Gugger committed
218
219
220

        # fallback to the last dtype
        return last_tuple[1].dtype
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235


def get_state_dict_float_dtype(state_dict):
    """
    Returns the first found floating dtype in `state_dict` or asserts if none were found.
    """
    for t in state_dict.values():
        if t.is_floating_point():
            return t.dtype

    raise ValueError("couldn't find any floating point dtypes in state_dict")


def get_state_dict_dtype(state_dict):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
236
    Returns the first found floating dtype in `state_dict` if there is one, otherwise returns the first dtype.
237
238
239
240
241
242
243
    """
    for t in state_dict.values():
        if t.is_floating_point():
            return t.dtype

    # if no floating dtype was found return whatever the first dtype is
    else:
Sylvain Gugger's avatar
Sylvain Gugger committed
244
        return next(state_dict.values()).dtype
245
246


Sylvain Gugger's avatar
Sylvain Gugger committed
247
248
249
250
251
252
253
254
255
256
257
258
259
def dtype_byte_size(dtype):
    """
    Returns the size (in bytes) occupied by one parameter of type `dtype`.

    Example:

    ```py
    >>> dtype_byte_size(torch.float32)
    4
    ```
    """
    if dtype == torch.bool:
        return 1 / 8
260
    bit_search = re.search(r"[^\d](\d+)$", str(dtype))
Sylvain Gugger's avatar
Sylvain Gugger committed
261
262
263
264
265
266
    if bit_search is None:
        raise ValueError(f"`dtype` is not a valid dtype: {dtype}.")
    bit_size = int(bit_search.groups()[0])
    return bit_size // 8


267
268
269
def shard_checkpoint(
    state_dict: Dict[str, torch.Tensor], max_shard_size: Union[int, str] = "10GB", weights_name: str = WEIGHTS_NAME
):
Sylvain Gugger's avatar
Sylvain Gugger committed
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
    """
    Splits a model state dictionary in sub-checkpoints so that the final size of each sub-checkpoint does not exceed a
    given size.

    The sub-checkpoints are determined by iterating through the `state_dict` in the order of its keys, so there is no
    optimization made to make each sub-checkpoint as close as possible to the maximum size passed. For example, if the
    limit is 10GB and we have weights of sizes [6GB, 6GB, 2GB, 6GB, 2GB, 2GB] they will get sharded as [6GB], [6+2GB],
    [6+2+2GB] and not [6+2+2GB], [6+2GB], [6GB].

    <Tip warning={true}>

    If one of the model's weight is bigger that `max_sahrd_size`, it will end up in its own sub-checkpoint which will
    have a size greater than `max_shard_size`.

    </Tip>

    Args:
        state_dict (`Dict[str, torch.Tensor]`): The state dictionary of a model to save.
        max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
            The maximum size of each sub-checkpoint. If expressed as a string, needs to be digits followed by a unit
            (like `"5MB"`).
291
292
        weights_name (`str`, *optional*, defaults to `"pytorch_model.bin"`):
            The name of the model save file.
Sylvain Gugger's avatar
Sylvain Gugger committed
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
    """
    max_shard_size = convert_file_size_to_int(max_shard_size)

    sharded_state_dicts = []
    current_block = {}
    current_block_size = 0
    total_size = 0

    for key, weight in state_dict.items():
        weight_size = weight.numel() * dtype_byte_size(weight.dtype)

        # If this weight is going to tip up over the maximal size, we split.
        if current_block_size + weight_size > max_shard_size:
            sharded_state_dicts.append(current_block)
            current_block = {}
            current_block_size = 0

        current_block[key] = weight
        current_block_size += weight_size
        total_size += weight_size

    # Add the last block
    sharded_state_dicts.append(current_block)

    # If we only have one shard, we return it
    if len(sharded_state_dicts) == 1:
319
        return {weights_name: sharded_state_dicts[0]}, None
Sylvain Gugger's avatar
Sylvain Gugger committed
320
321
322
323
324

    # Otherwise, let's build the index
    weight_map = {}
    shards = {}
    for idx, shard in enumerate(sharded_state_dicts):
325
326
327
328
        shard_file = weights_name.replace(".bin", f"-{idx+1:05d}-of-{len(sharded_state_dicts):05d}.bin")
        shard_file = shard_file.replace(
            ".safetensors", f"-{idx + 1:05d}-of-{len(sharded_state_dicts):05d}.safetensors"
        )
Sylvain Gugger's avatar
Sylvain Gugger committed
329
330
331
332
333
334
335
336
337
338
        shards[shard_file] = shard
        for key in shard.keys():
            weight_map[key] = shard_file

    # Add the metadata
    metadata = {"total_size": total_size}
    index = {"metadata": metadata, "weight_map": weight_map}
    return shards, index


339
def load_sharded_checkpoint(model, folder, strict=True, prefer_safe=True):
340
341
342
343
344
345
346
347
348
349
350
351
352
    """
    This is the same as
    [`torch.nn.Module.load_state_dict`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=load_state_dict#torch.nn.Module.load_state_dict)
    but for a sharded checkpoint.

    This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being
    loaded in the model.

    Args:
        model (`torch.nn.Module`): The model in which to load the checkpoint.
        folder (`str` or `os.PathLike`): A path to a folder containing the sharded checkpoint.
        strict (`bool`, *optional`, defaults to `True`):
            Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint.
353
354
355
        prefer_safe (`bool`, *optional*, defaults to `False`)
            If both safetensors and PyTorch save files are present in checkpoint and `prefer_safe` is True, the
            safetensors files will be loaded. Otherwise, PyTorch files are always loaded when possible.
356
357
358
359
360
361
362
363

    Returns:
        `NamedTuple`: A named tuple with `missing_keys` and `unexpected_keys` fields
            - `missing_keys` is a list of str containing the missing keys
            - `unexpected_keys` is a list of str containing the unexpected keys
    """
    # Load the index
    index_file = os.path.join(folder, WEIGHTS_INDEX_NAME)
364
    safe_index_file = os.path.join(folder, SAFE_WEIGHTS_INDEX_NAME)
365

366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
    index_present = os.path.isfile(index_file)
    safe_index_present = os.path.isfile(safe_index_file)

    if not index_present and not (safe_index_present and is_safetensors_available()):
        filenames = (
            (WEIGHTS_INDEX_NAME, SAFE_WEIGHTS_INDEX_NAME) if is_safetensors_available() else (WEIGHTS_INDEX_NAME,)
        )
        raise ValueError(f"Can't find a checkpoint index ({' or '.join(filenames)}) in {folder}.")

    load_safe = False
    if safe_index_present:
        if prefer_safe:
            if is_safetensors_available():
                load_safe = True  # load safe due to preference
            else:
                logger.warning(
                    f"Cannot load sharded checkpoint at {folder} safely since safetensors is not installed!"
                )
        elif not index_present:
            load_safe = True  # load safe since we have no other choice

    load_index = safe_index_file if load_safe else index_file

    with open(load_index, "r", encoding="utf-8") as f:
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
        index = json.load(f)

    shard_files = list(set(index["weight_map"].values()))

    # If strict=True, error before loading any of the state dicts.
    loaded_keys = index["weight_map"].keys()
    model_keys = model.state_dict().keys()
    missing_keys = [key for key in model_keys if key not in loaded_keys]
    unexpected_keys = [key for key in loaded_keys if key not in model_keys]
    if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0):
        error_message = f"Error(s) in loading state_dict for {model.__class__.__name__}"
        if len(missing_keys) > 0:
            str_missing_keys = ",".join([f'"{k}"' for k in missing_keys])
            error_message += f"\nMissing key(s): {str_missing_keys}."
        if len(unexpected_keys) > 0:
            str_unexpected_keys = ",".join([f'"{k}"' for k in unexpected_keys])
            error_message += f"\nMissing key(s): {str_unexpected_keys}."
        raise RuntimeError(error_message)

409
410
    loader = safe_load_file if load_safe else partial(torch.load, map_location="cpu")

411
    for shard_file in shard_files:
412
        state_dict = loader(os.path.join(folder, shard_file))
413
414
        model.load_state_dict(state_dict, strict=False)

415
        # Make sure memory is freed before we load the next state dict.
416
417
418
419
420
421
422
        del state_dict
        gc.collect()

    # Return the same thing as PyTorch load_state_dict function.
    return torch.nn.modules.module._IncompatibleKeys(missing_keys, unexpected_keys)


Sylvain Gugger's avatar
Sylvain Gugger committed
423
424
425
426
def load_state_dict(checkpoint_file: Union[str, os.PathLike]):
    """
    Reads a PyTorch checkpoint file, returning properly formatted errors if they arise.
    """
427
428
429
430
431
432
433
434
435
436
437
438
439
440
    if checkpoint_file.endswith(".safetensors") and is_safetensors_available():
        # Check format of the archive
        with safe_open(checkpoint_file, framework="pt") as f:
            metadata = f.metadata()
        if metadata.get("format") not in ["pt", "tf", "flax"]:
            raise OSError(
                f"The safetensors archive passed at {checkpoint_file} does not contain the valid metadata. Make sure "
                "you save your model with the `save_pretrained` method."
            )
        elif metadata["format"] != "pt":
            raise NotImplementedError(
                f"Conversion from a {metadata['format']} safetensors archive to PyTorch is not implemented yet."
            )
        return safe_load_file(checkpoint_file)
Sylvain Gugger's avatar
Sylvain Gugger committed
441
442
443
444
445
    try:
        return torch.load(checkpoint_file, map_location="cpu")
    except Exception as e:
        try:
            with open(checkpoint_file) as f:
446
                if f.read(7) == "version":
Sylvain Gugger's avatar
Sylvain Gugger committed
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
                    raise OSError(
                        "You seem to have cloned a repository without having git-lfs installed. Please install "
                        "git-lfs and run `git lfs install` followed by `git lfs pull` in the folder "
                        "you cloned."
                    )
                else:
                    raise ValueError(
                        f"Unable to locate the file {checkpoint_file} which is necessary to load this pretrained "
                        "model. Make sure you have saved the model properly."
                    ) from e
        except (UnicodeDecodeError, ValueError):
            raise OSError(
                f"Unable to load weights from pytorch checkpoint file for '{checkpoint_file}' "
                f"at '{checkpoint_file}'. "
                "If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True."
            )


465
466
467
468
469
470
471
472
473
474
475
def set_initialized_submodules(model, state_dict_keys):
    """
    Sets the `_is_hf_initialized` flag in all submodules of a given model when all its weights are in the loaded state
    dict.
    """
    for module_name, module in model.named_modules():
        loaded_keys = [k.replace(f"{module_name}.", "") for k in state_dict_keys if k.startswith(f"{module_name}.")]
        if len(set(module.state_dict().keys()) - set(loaded_keys)) == 0:
            module._is_hf_initialized = True


Sylvain Gugger's avatar
Sylvain Gugger committed
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
def _load_state_dict_into_model(model_to_load, state_dict, start_prefix):
    # Convert old format to new format if needed from a PyTorch state_dict
    old_keys = []
    new_keys = []
    for key in state_dict.keys():
        new_key = None
        if "gamma" in key:
            new_key = key.replace("gamma", "weight")
        if "beta" in key:
            new_key = key.replace("beta", "bias")
        if new_key:
            old_keys.append(key)
            new_keys.append(new_key)
    for old_key, new_key in zip(old_keys, new_keys):
        state_dict[new_key] = state_dict.pop(old_key)

    # copy state_dict so _load_from_state_dict can modify it
    metadata = getattr(state_dict, "_metadata", None)
    state_dict = state_dict.copy()
    if metadata is not None:
        state_dict._metadata = metadata

    error_msgs = []

    # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
    # so we need to apply the function recursively.
502
    def load(module: nn.Module, state_dict, prefix=""):
Sylvain Gugger's avatar
Sylvain Gugger committed
503
504
        local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
        args = (state_dict, prefix, local_metadata, True, [], [], error_msgs)
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
        # Parameters of module and children will start with prefix. We can exit early if there are none in this
        # state_dict
        if len([key for key in state_dict if key.startswith(prefix)]) > 0:
            if is_deepspeed_zero3_enabled():
                import deepspeed

                # In sharded models, each shard has only part of the full state_dict, so only gather
                # parameters that are in the current state_dict.
                named_parameters = dict(module.named_parameters(prefix=prefix[:-1], recurse=False))
                params_to_gather = [named_parameters[k] for k in state_dict.keys() if k in named_parameters]
                if len(params_to_gather) > 0:
                    # because zero3 puts placeholders in model params, this context
                    # manager gathers (unpartitions) the params of the current layer, then loads from
                    # the state dict and then re-partitions them again
                    with deepspeed.zero.GatheredParameters(params_to_gather, modifier_rank=0):
                        if torch.distributed.get_rank() == 0:
                            module._load_from_state_dict(*args)
            else:
                module._load_from_state_dict(*args)
Sylvain Gugger's avatar
Sylvain Gugger committed
524
525
526

        for name, child in module._modules.items():
            if child is not None:
527
                load(child, state_dict, prefix + name + ".")
Sylvain Gugger's avatar
Sylvain Gugger committed
528

529
530
531
532
    load(model_to_load, state_dict, prefix=start_prefix)
    # Delete `state_dict` so it could be collected by GC earlier. Note that `state_dict` is a copy of the argument, so
    # it's safe to delete it.
    del state_dict
Sylvain Gugger's avatar
Sylvain Gugger committed
533
534
535
536

    return error_msgs


537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
def find_submodule_and_param_name(model, long_key, start_prefix):
    """
    A helper util to find the last sub-module and the param/buffer name. If `start_prefix` is supplied it'll be removed
    from the start of the key
    """

    if len(start_prefix) > 0 and long_key.startswith(start_prefix):
        long_key = ".".join(long_key.split(".")[1:])

    split_key = long_key.split(".")
    submodule = model
    while len(split_key) > 1:
        if hasattr(submodule, split_key[0]):
            submodule = getattr(submodule, split_key[0])
            del split_key[0]
        else:
            submodule = None
            break
    if submodule == model:
        submodule = None
    return submodule, split_key[0]


def _move_model_to_meta(model, loaded_state_dict_keys, start_prefix):
    """
    Moves `loaded_state_dict_keys` in model to meta device which frees up the memory taken by those params.

    `start_prefix` is used for models which insert their name into model keys, e.g. `bert` in
    `bert.pooler.dense.weight`

    """

    # dematerialize param storage for keys that are going to be replaced by state_dict, by
    # putting those on the meta device
    for k in loaded_state_dict_keys:
        submodule, param_name = find_submodule_and_param_name(model, k, start_prefix)
        if submodule is not None:
            # selectively switch to the meta device only those params/buffers that will
            # be next replaced from state_dict. This a complex way to do p.to_("meta")
            # since we have no in-place to_ for tensors.
            new_val = getattr(submodule, param_name)
            if isinstance(new_val, torch.nn.Parameter):
                # isinstance returns False for Params on meta device, so switch after the check
                new_val = torch.nn.Parameter(new_val.to("meta"))
            else:
                new_val = new_val.to("meta")
            setattr(submodule, param_name, new_val)


586
587
588
589
590
591
592
593
594
595
596
597
def _load_state_dict_into_meta_model(
    model,
    state_dict,
    loaded_state_dict_keys,  # left for now but could be removed, see below
    start_prefix,
    expected_keys,
    device_map=None,
    offload_folder=None,
    offload_index=None,
    state_dict_folder=None,
    state_dict_index=None,
    dtype=None,
598
    load_in_8bit=False,
Sylvain Gugger's avatar
Sylvain Gugger committed
599
    is_safetensors=False,
600
    keep_in_fp32_modules=None,
601
):
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
    """
    This is somewhat similar to `_load_state_dict_into_model`, but deals with a model that has some or all of its
    params on a `meta` device. It replaces the model params with the data from the `state_dict`, while moving the
    params back to the normal device, but only for `loaded_state_dict_keys`.

    `start_prefix` is used for models which insert their name into model keys, e.g. `bert` in
    `bert.pooler.dense.weight`

    """

    # XXX: remaining features to implement to be fully compatible with _load_state_dict_into_model
    # - deepspeed zero 3 support
    # - need to copy metadata if any - see _load_state_dict_into_model
    # - handling error_msgs - mimicking the error handling in module._load_from_state_dict()
    # - Is there a situation where some keys aren't in `loaded_state_dict_keys` and in which case
    #   they won't get loaded.

619
620
621
    if load_in_8bit:
        from .utils.bitsandbytes import set_module_8bit_tensor_to_device

622
623
    error_msgs = []

624
625
626
627
628
629
630
631
632
633
634
635
636
    old_keys = []
    new_keys = []
    for key in state_dict.keys():
        new_key = None
        if "gamma" in key:
            new_key = key.replace("gamma", "weight")
        if "beta" in key:
            new_key = key.replace("beta", "bias")
        if new_key:
            old_keys.append(key)
            new_keys.append(new_key)
    for old_key, new_key in zip(old_keys, new_keys):
        state_dict[new_key] = state_dict.pop(old_key)
637

638
639
640
641
642
643
644
645
646
    for param_name, param in state_dict.items():
        # First part of the test is always true as load_state_dict_keys always contains state_dict keys.
        if param_name not in loaded_state_dict_keys or param_name not in expected_keys:
            continue

        if param_name.startswith(start_prefix):
            param_name = param_name[len(start_prefix) :]

        module_name = param_name
647
        set_module_kwargs = {}
648

649
        # We convert floating dtypes to the `dtype` passed. We want to keep the buffers/params
650
651
        # in int/uint/bool and not cast them.
        if dtype is not None and torch.is_floating_point(param):
652
653
654
655
656
657
            if (
                keep_in_fp32_modules is not None
                and any(module_to_keep_in_fp32 in param_name for module_to_keep_in_fp32 in keep_in_fp32_modules)
                and dtype == torch.float16
            ):
                param = param.to(torch.float32)
658
659
660
661
662

                # For backward compatibility with older versions of `accelerate`
                # TODO: @sgugger replace this check with version check at the next `accelerate` release
                if "dtype" in list(inspect.signature(set_module_tensor_to_device).parameters):
                    set_module_kwargs["dtype"] = torch.float32
663
664
            else:
                param = param.to(dtype)
665
666
667
668
669
670
671
672
673
674
675
676

        # For compatibility with PyTorch load_state_dict which converts state dict dtype to existing dtype in model
        if dtype is None:
            old_param = model
            splits = param_name.split(".")
            for split in splits:
                old_param = getattr(old_param, split)
                if old_param is None:
                    break

            if old_param is not None:
                param = param.to(old_param.dtype)
677

678
679
        set_module_kwargs["value"] = param

680
681
682
683
684
685
686
687
688
689
690
691
        if device_map is None:
            param_device = "cpu"
        else:
            # find next higher level module that is defined in device_map:
            # bert.lm_head.weight -> bert.lm_head -> bert -> ''
            while len(module_name) > 0 and module_name not in device_map:
                module_name = ".".join(module_name.split(".")[:-1])
            if module_name == "" and "" not in device_map:
                # TODO: group all errors and raise at the end.
                raise ValueError(f"{param_name} doesn't have any device set.")
            param_device = device_map[module_name]
        if param_device == "disk":
Sylvain Gugger's avatar
Sylvain Gugger committed
692
693
            if not is_safetensors:
                offload_index = offload_weight(param, param_name, offload_folder, offload_index)
694
695
        elif param_device == "cpu" and state_dict_index is not None:
            state_dict_index = offload_weight(param, param_name, state_dict_folder, state_dict_index)
696
        elif not load_in_8bit:
697
698
            # For backward compatibility with older versions of `accelerate`
            set_module_tensor_to_device(model, param_name, param_device, **set_module_kwargs)
699
700
        else:
            set_module_8bit_tensor_to_device(model, param_name, param_device, value=param)
701
702

    return error_msgs, offload_index, state_dict_index
703
704


705
706
707
708
709
710
711
712
713
def _add_variant(weights_name: str, variant: Optional[str] = None) -> str:
    if variant is not None:
        splits = weights_name.split(".")
        splits = splits[:-1] + [variant] + splits[-1:]
        weights_name = ".".join(splits)

    return weights_name


714
class ModuleUtilsMixin:
Julien Chaumond's avatar
Julien Chaumond committed
715
    """
716
    A few utilities for `torch.nn.Modules`, to be used as a mixin.
Julien Chaumond's avatar
Julien Chaumond committed
717
718
    """

719
720
721
722
    @staticmethod
    def _hook_rss_memory_pre_forward(module, *args, **kwargs):
        try:
            import psutil
Sylvain Gugger's avatar
Sylvain Gugger committed
723
        except ImportError:
724
725
726
727
728
729
730
731
732
733
734
            raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")

        process = psutil.Process(os.getpid())
        mem = process.memory_info()
        module.mem_rss_pre_forward = mem.rss
        return None

    @staticmethod
    def _hook_rss_memory_post_forward(module, *args, **kwargs):
        try:
            import psutil
Sylvain Gugger's avatar
Sylvain Gugger committed
735
        except ImportError:
736
737
738
739
740
741
742
743
744
745
            raise ImportError("You need to install psutil (pip install psutil) to use memory tracing.")

        process = psutil.Process(os.getpid())
        mem = process.memory_info()
        module.mem_rss_post_forward = mem.rss
        mem_rss_diff = module.mem_rss_post_forward - module.mem_rss_pre_forward
        module.mem_rss_diff = mem_rss_diff + (module.mem_rss_diff if hasattr(module, "mem_rss_diff") else 0)
        return None

    def add_memory_hooks(self):
Sylvain Gugger's avatar
Sylvain Gugger committed
746
747
748
        """
        Add a memory hook before and after each sub-module forward pass to record increase in memory consumption.

Sylvain Gugger's avatar
Sylvain Gugger committed
749
750
        Increase in memory consumption is stored in a `mem_rss_diff` attribute for each module and can be reset to zero
        with `model.reset_memory_hooks_state()`.
751
752
753
754
755
756
757
        """
        for module in self.modules():
            module.register_forward_pre_hook(self._hook_rss_memory_pre_forward)
            module.register_forward_hook(self._hook_rss_memory_post_forward)
        self.reset_memory_hooks_state()

    def reset_memory_hooks_state(self):
Sylvain Gugger's avatar
Sylvain Gugger committed
758
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
759
        Reset the `mem_rss_diff` attribute of each module (see [`~modeling_utils.ModuleUtilsMixin.add_memory_hooks`]).
Sylvain Gugger's avatar
Sylvain Gugger committed
760
        """
761
762
763
764
765
        for module in self.modules():
            module.mem_rss_diff = 0
            module.mem_rss_post_forward = 0
            module.mem_rss_pre_forward = 0

766
    @property
Sylvain Gugger's avatar
Sylvain Gugger committed
767
    def device(self) -> torch.device:
768
        """
769
        `torch.device`: The device on which the module is (assuming that all the module parameters are on the same
770
        device).
771
        """
Lysandre Debut's avatar
Lysandre Debut committed
772
        return get_parameter_device(self)
773

774
    @property
775
    def dtype(self) -> torch.dtype:
776
        """
777
        `torch.dtype`: The dtype of the module (assuming that all the module parameters have the same dtype).
778
        """
Lysandre Debut's avatar
Lysandre Debut committed
779
        return get_parameter_dtype(self)
780
781

    def invert_attention_mask(self, encoder_attention_mask: Tensor) -> Tensor:
Sylvain Gugger's avatar
Sylvain Gugger committed
782
783
784
785
        """
        Invert an attention mask (e.g., switches 0. and 1.).

        Args:
786
            encoder_attention_mask (`torch.Tensor`): An attention mask.
Sylvain Gugger's avatar
Sylvain Gugger committed
787
788

        Returns:
789
            `torch.Tensor`: The inverted attention mask.
Sylvain Gugger's avatar
Sylvain Gugger committed
790
        """
791
792
793
794
795
796
797
798
799
800
        if encoder_attention_mask.dim() == 3:
            encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
        if encoder_attention_mask.dim() == 2:
            encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
        # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition
        # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow
        # /transformer/transformer_layers.py#L270
        # encoder_extended_attention_mask = (encoder_extended_attention_mask ==
        # encoder_extended_attention_mask.transpose(-1, -2))
        encoder_extended_attention_mask = encoder_extended_attention_mask.to(dtype=self.dtype)  # fp16 compatibility
Yih-Dar's avatar
Yih-Dar committed
801
        encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * torch.finfo(self.dtype).min
802

803
804
        return encoder_extended_attention_mask

805
    @staticmethod
806
807
808
809
810
811
812
    def create_extended_attention_mask_for_decoder(input_shape, attention_mask, device=None):
        if device is not None:
            warnings.warn(
                "The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning
            )
        else:
            device = attention_mask.device
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
        batch_size, seq_length = input_shape
        seq_ids = torch.arange(seq_length, device=device)
        causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None]
        # in case past_key_values are used we need to add a prefix ones mask to the causal mask
        # causal and attention masks must have same type with pytorch version < 1.3
        causal_mask = causal_mask.to(attention_mask.dtype)

        if causal_mask.shape[1] < attention_mask.shape[1]:
            prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
            causal_mask = torch.cat(
                [
                    torch.ones((batch_size, seq_length, prefix_seq_len), device=device, dtype=causal_mask.dtype),
                    causal_mask,
                ],
                axis=-1,
            )

        extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
        return extended_attention_mask

833
    def get_extended_attention_mask(
834
        self, attention_mask: Tensor, input_shape: Tuple[int], device: torch.device = None, dtype: torch.float = None
835
    ) -> Tensor:
Sylvain Gugger's avatar
Sylvain Gugger committed
836
837
        """
        Makes broadcastable attention and causal masks so that future and masked tokens are ignored.
838
839

        Arguments:
840
            attention_mask (`torch.Tensor`):
Sylvain Gugger's avatar
Sylvain Gugger committed
841
                Mask with ones indicating tokens to attend to, zeros for tokens to ignore.
842
            input_shape (`Tuple[int]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
843
                The shape of the input to the model.
844
845

        Returns:
846
            `torch.Tensor` The extended attention mask, with a the same dtype as `attention_mask.dtype`.
847
        """
Yih-Dar's avatar
Yih-Dar committed
848
849
850
        if dtype is None:
            dtype = self.dtype

851
852
853
854
855
856
        if not (attention_mask.dim() == 2 and self.config.is_decoder):
            # show warning only if it won't be shown in `create_extended_attention_mask_for_decoder`
            if device is not None:
                warnings.warn(
                    "The `device` argument is deprecated and will be removed in v5 of Transformers.", FutureWarning
                )
857
858
859
860
861
862
863
864
865
        # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
        # ourselves in which case we just need to make it broadcastable to all heads.
        if attention_mask.dim() == 3:
            extended_attention_mask = attention_mask[:, None, :, :]
        elif attention_mask.dim() == 2:
            # Provided a padding mask of dimensions [batch_size, seq_length]
            # - if the model is a decoder, apply a causal mask in addition to the padding mask
            # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length]
            if self.config.is_decoder:
866
                extended_attention_mask = ModuleUtilsMixin.create_extended_attention_mask_for_decoder(
867
868
                    input_shape, attention_mask, device
                )
869
870
871
872
            else:
                extended_attention_mask = attention_mask[:, None, None, :]
        else:
            raise ValueError(
873
                f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
874
875
876
877
            )

        # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
        # masked positions, this operation will create a tensor which is 0.0 for
878
        # positions we want to attend and the dtype's smallest value for masked positions.
879
880
        # Since we are adding it to the raw scores before the softmax, this is
        # effectively the same as removing these entirely.
Yih-Dar's avatar
Yih-Dar committed
881
882
        extended_attention_mask = extended_attention_mask.to(dtype=dtype)  # fp16 compatibility
        extended_attention_mask = (1.0 - extended_attention_mask) * torch.finfo(dtype).min
883
884
        return extended_attention_mask

Sylvain Gugger's avatar
Sylvain Gugger committed
885
886
887
    def get_head_mask(
        self, head_mask: Optional[Tensor], num_hidden_layers: int, is_attention_chunked: bool = False
    ) -> Tensor:
888
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
889
890
891
        Prepare the head mask if needed.

        Args:
892
            head_mask (`torch.Tensor` with shape `[num_heads]` or `[num_hidden_layers x num_heads]`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
893
                The mask indicating if we should keep the heads or not (1.0 for keep, 0.0 for discard).
894
            num_hidden_layers (`int`):
Sylvain Gugger's avatar
Sylvain Gugger committed
895
                The number of hidden layers in the model.
896
            is_attention_chunked: (`bool`, *optional*, defaults to `False`):
Sylvain Gugger's avatar
Sylvain Gugger committed
897
898
                Whether or not the attentions scores are computed by chunks or not.

899
        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
900
901
            `torch.Tensor` with shape `[num_hidden_layers x batch x num_heads x seq_length x seq_length]` or list with
            `[None]` for each layer.
902
903
904
        """
        if head_mask is not None:
            head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
Patrick von Platen's avatar
Patrick von Platen committed
905
906
            if is_attention_chunked is True:
                head_mask = head_mask.unsqueeze(-1)
907
908
909
910
911
912
913
914
915
916
917
918
919
        else:
            head_mask = [None] * num_hidden_layers

        return head_mask

    def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
        """-> [num_hidden_layers x batch x num_heads x seq_length x seq_length]"""
        if head_mask.dim() == 1:
            head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
            head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
        elif head_mask.dim() == 2:
            head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)  # We can specify head_mask for each layer
        assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
920
        head_mask = head_mask.to(dtype=self.dtype)  # switch to float if need + fp16 compatibility
921
922
        return head_mask

923
924
925
926
927
    def num_parameters(self, only_trainable: bool = False, exclude_embeddings: bool = False) -> int:
        """
        Get number of (optionally, trainable or non-embeddings) parameters in the module.

        Args:
928
            only_trainable (`bool`, *optional*, defaults to `False`):
929
930
                Whether or not to return only the number of trainable parameters

931
            exclude_embeddings (`bool`, *optional*, defaults to `False`):
932
933
934
                Whether or not to return only the number of non-embeddings parameters

        Returns:
935
            `int`: The number of parameters.
936
937
        """

938
939
940
941
942
943
944
945
946
947
        if exclude_embeddings:
            embedding_param_names = [
                f"{name}.weight" for name, module_type in self.named_modules() if isinstance(module_type, nn.Embedding)
            ]
            non_embedding_parameters = [
                parameter for name, parameter in self.named_parameters() if name not in embedding_param_names
            ]
            return sum(p.numel() for p in non_embedding_parameters if p.requires_grad or not only_trainable)
        else:
            return sum(p.numel() for p in self.parameters() if p.requires_grad or not only_trainable)
948
949
950
951
952
953

    def estimate_tokens(self, input_dict: Dict[str, Union[torch.Tensor, Any]]) -> int:
        """
        Helper function to estimate the total number of tokens from the model inputs.

        Args:
954
            inputs (`dict`): The model inputs.
955
956

        Returns:
957
            `int`: The total number of tokens.
958
        """
959
960
        if not hasattr(self, "warnings_issued"):
            self.warnings_issued = {}
961
962
        if self.main_input_name in input_dict:
            return input_dict[self.main_input_name].numel()
963
        elif "estimate_tokens" not in self.warnings_issued:
964
            logger.warning(
965
966
                "Could not estimate the number of tokens of the input, floating-point operations will not be computed"
            )
967
968
            self.warnings_issued["estimate_tokens"] = True
        return 0
969
970
971
972
973
974
975

    def floating_point_ops(
        self, input_dict: Dict[str, Union[torch.Tensor, Any]], exclude_embeddings: bool = True
    ) -> int:
        """
        Get number of (optionally, non-embeddings) floating-point operations for the forward and backward passes of a
        batch with this transformer model. Default approximation neglects the quadratic dependency on the number of
Sylvain Gugger's avatar
Sylvain Gugger committed
976
977
        tokens (valid if `12 * d_model << sequence_length`) as laid out in [this
        paper](https://arxiv.org/pdf/2001.08361.pdf) section 2.1. Should be overridden for transformers with parameter
Sylvain Gugger's avatar
Sylvain Gugger committed
978
        re-use e.g. Albert or Universal Transformers, or if doing long-range modeling with very high sequence lengths.
979
980

        Args:
981
            batch_size (`int`):
982
983
                The batch size for the forward pass.

984
            sequence_length (`int`):
985
986
                The number of tokens in each line of the batch.

987
            exclude_embeddings (`bool`, *optional*, defaults to `True`):
988
989
990
                Whether or not to count embedding and softmax operations.

        Returns:
991
            `int`: The number of floating-point operations.
992
993
994
995
        """

        return 6 * self.estimate_tokens(input_dict) * self.num_parameters(exclude_embeddings=exclude_embeddings)

Julien Chaumond's avatar
Julien Chaumond committed
996

Yih-Dar's avatar
Yih-Dar committed
997
class BackboneMixin:
998
999
1000
1001
1002
1003
1004
1005
1006
1007
    @property
    def out_feature_channels(self):
        # the current backbones will output the number of channels for each stage
        # even if that stage is not in the out_features list.
        return {stage: self.num_features[i] for i, stage in enumerate(self.stage_names)}

    @property
    def channels(self):
        return [self.out_feature_channels[name] for name in self.out_features]

Yih-Dar's avatar
Yih-Dar committed
1008
1009
1010
1011
1012
    def forward_with_filtered_kwargs(self, *args, **kwargs):
        signature = dict(inspect.signature(self.forward).parameters)
        filtered_kwargs = {k: v for k, v in kwargs.items() if k in signature}
        return self(*args, **filtered_kwargs)

1013
1014
1015
1016
1017
1018
1019
1020
1021
    def forward(
        self,
        pixel_values: Tensor,
        output_hidden_states: Optional[bool] = None,
        output_attentions: Optional[bool] = None,
        return_dict: Optional[bool] = None,
    ):
        raise NotImplementedError("This method should be implemented by the derived class.")

Yih-Dar's avatar
Yih-Dar committed
1022

Sylvain Gugger's avatar
Sylvain Gugger committed
1023
class PreTrainedModel(nn.Module, ModuleUtilsMixin, GenerationMixin, PushToHubMixin):
1024
1025
    r"""
    Base class for all models.
1026

Sylvain Gugger's avatar
Sylvain Gugger committed
1027
1028
    [`PreTrainedModel`] takes care of storing the configuration of the models and handles methods for loading,
    downloading and saving models as well as a few methods common to all models to:
1029

1030
1031
        - resize the input embeddings,
        - prune heads in the self-attention heads.
1032

1033
    Class attributes (overridden by derived classes):
Sylvain Gugger's avatar
Sylvain Gugger committed
1034

Sylvain Gugger's avatar
Sylvain Gugger committed
1035
1036
1037
1038
        - **config_class** ([`PretrainedConfig`]) -- A subclass of [`PretrainedConfig`] to use as configuration class
          for this model architecture.
        - **load_tf_weights** (`Callable`) -- A python *method* for loading a TensorFlow checkpoint in a PyTorch model,
          taking as arguments:
1039

Sylvain Gugger's avatar
Sylvain Gugger committed
1040
1041
            - **model** ([`PreTrainedModel`]) -- An instance of the model on which to load the TensorFlow checkpoint.
            - **config** ([`PreTrainedConfig`]) -- An instance of the configuration associated to the model.
1042
            - **path** (`str`) -- A path to the TensorFlow checkpoint.
1043

Sylvain Gugger's avatar
Sylvain Gugger committed
1044
1045
        - **base_model_prefix** (`str`) -- A string indicating the attribute associated to the base model in derived
          classes of the same architecture adding modules on top of the base model.
1046
        - **is_parallelizable** (`bool`) -- A flag indicating whether this model supports model parallelization.
Sylvain Gugger's avatar
Sylvain Gugger committed
1047
1048
        - **main_input_name** (`str`) -- The name of the principal input to the model (often `input_ids` for NLP
          models, `pixel_values` for vision models and `input_values` for speech models).
1049
    """
1050
    config_class = None
1051
    base_model_prefix = ""
1052
    main_input_name = "input_ids"
1053
    _auto_class = None
1054
    _no_split_modules = None
1055
    _keep_in_fp32_modules = None
1056

1057
1058
    # a list of `re` patterns of `state_dict` keys that should be removed from the list of missing
    # keys we find (keys inside the model but not in the checkpoint) and avoid unnecessary warnings.
1059
    _keys_to_ignore_on_load_missing = None
1060
1061
1062
    # a list of `re` patterns of `state_dict` keys that should be removed from the list of
    # unexpected keys we find (keys inside the checkpoint but not the model) and avoid unnecessary
    # warnings.
1063
    _keys_to_ignore_on_load_unexpected = None
1064
1065
    # a list of `state_dict` keys to ignore when saving the model (useful for keys that aren't
    # trained, but which are either deterministic or tied variables)
1066
    _keys_to_ignore_on_save = None
1067

1068
    is_parallelizable = False
1069
    supports_gradient_checkpointing = False
1070

1071
    @property
1072
    def dummy_inputs(self) -> Dict[str, torch.Tensor]:
1073
        """
1074
        `Dict[str, torch.Tensor]`: Dummy inputs to do a forward pass in the network.
1075
        """
1076
        return {"input_ids": torch.tensor(DUMMY_INPUTS)}
1077

1078
1079
1080
1081
1082
1083
1084
    @property
    def framework(self) -> str:
        """
        :str: Identifies that this is a PyTorch model.
        """
        return "pt"

1085
    def __init__(self, config: PretrainedConfig, *inputs, **kwargs):
Julien Chaumond's avatar
Julien Chaumond committed
1086
        super().__init__()
1087
1088
        if not isinstance(config, PretrainedConfig):
            raise ValueError(
1089
1090
1091
                f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
                "`PretrainedConfig`. To create a model from a pretrained model use "
                f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
1092
            )
1093
        # Save config and origin of the pretrained weights if given in model
1094
        self.config = config
1095
        self.name_or_path = config.name_or_path
1096
        self.warnings_issued = {}
1097
        self.generation_config = GenerationConfig.from_model_config(config) if self.can_generate() else None
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111

    def post_init(self):
        """
        A method executed at the end of each Transformer model initialization, to execute code that needs the model's
        modules properly initialized (such as weight initialization).
        """
        self.init_weights()
        self._backward_compatibility_gradient_checkpointing()

    def _backward_compatibility_gradient_checkpointing(self):
        if self.supports_gradient_checkpointing and getattr(self.config, "gradient_checkpointing", False):
            self.gradient_checkpointing_enable()
            # Remove the attribute now that is has been consumed, so it's no saved in the config.
            delattr(self.config, "gradient_checkpointing")
1112

1113
1114
1115
1116
1117
1118
    @classmethod
    def _from_config(cls, config, **kwargs):
        """
        All context managers that the model should be initialized under go here.

        Args:
1119
1120
            torch_dtype (`torch.dtype`, *optional*):
                Override the default `torch.dtype` and load the model under this dtype.
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
        """
        torch_dtype = kwargs.pop("torch_dtype", None)

        # override default dtype if needed
        dtype_orig = None
        if torch_dtype is not None:
            dtype_orig = cls._set_default_torch_dtype(torch_dtype)

        if is_deepspeed_zero3_enabled():
            import deepspeed

            logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
            # this immediately partitions the model across all gpus, to avoid the overhead in time
            # and memory copying it on CPU or each GPU first
1135
            with deepspeed.zero.Init(config_dict_or_path=deepspeed_config()):
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
                model = cls(config, **kwargs)
        else:
            model = cls(config, **kwargs)

        # restore default dtype if it was modified
        if dtype_orig is not None:
            torch.set_default_dtype(dtype_orig)

        return model

    @classmethod
    def _set_default_torch_dtype(cls, dtype: torch.dtype) -> torch.dtype:
        """
        Change the default dtype and return the previous one. This is needed when wanting to instantiate the model
        under specific dtype.

        Args:
1153
            dtype (`torch.dtype`):
1154
1155
1156
                a floating dtype to set to.

        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
1157
1158
            `torch.dtype`: the original `dtype` that can be used to restore `torch.set_default_dtype(dtype)` if it was
            modified. If it wasn't, returns `None`.
1159

1160
1161
        Note `set_default_dtype` currently only works with floating-point types and asserts if for example,
        `torch.int64` is passed. So if a non-float `dtype` is passed this functions will throw an exception.
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
        """
        if not dtype.is_floating_point:
            raise ValueError(
                f"Can't instantiate {cls.__name__} model under dtype={dtype} since it is not a floating point dtype"
            )

        logger.info(f"Instantiating {cls.__name__} model under default dtype {dtype}.")
        dtype_orig = torch.get_default_dtype()
        torch.set_default_dtype(dtype)
        return dtype_orig

1173
    @property
1174
1175
    def base_model(self) -> nn.Module:
        """
1176
        `torch.nn.Module`: The main body of the model.
1177
        """
1178
        return getattr(self, self.base_model_prefix, self)
thomwolf's avatar
thomwolf committed
1179

1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
    def can_generate(self) -> bool:
        """
        Returns whether this model can generate sequences with `.generate()`.

        Returns:
            `bool`: Whether this model can generate sequences with `.generate()`.
        """
        # Detects whether `prepare_inputs_for_generation` has been overwritten, which is a requirement for generation
        if "GenerationMixin" in str(self.prepare_inputs_for_generation):
            return False
        return True

1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
    def enable_input_require_grads(self):
        """
        Enables the gradients for the input embeddings. This is useful for fine-tuning adapter weights while keeping
        the model weights fixed.
        """

        def make_inputs_require_grads(module, input, output):
            output.requires_grad_(True)

        self._require_grads_hook = self.get_input_embeddings().register_forward_hook(make_inputs_require_grads)

    def disable_input_require_grads(self):
        """
        Removes the `_require_grads_hook`.
        """
        self._require_grads_hook.remove()

1209
    def get_input_embeddings(self) -> nn.Module:
1210
1211
1212
1213
        """
        Returns the model's input embeddings.

        Returns:
1214
            `nn.Module`: A torch module mapping vocabulary to hidden states.
thomwolf's avatar
thomwolf committed
1215
        """
1216
        base_model = getattr(self, self.base_model_prefix, self)
thomwolf's avatar
thomwolf committed
1217
1218
1219
1220
        if base_model is not self:
            return base_model.get_input_embeddings()
        else:
            raise NotImplementedError
thomwolf's avatar
thomwolf committed
1221

1222
    def set_input_embeddings(self, value: nn.Module):
1223
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
1224
        Set model's input embeddings.
1225
1226

        Args:
1227
            value (`nn.Module`): A module mapping vocabulary to hidden states.
thomwolf's avatar
thomwolf committed
1228
1229
1230
1231
1232
1233
        """
        base_model = getattr(self, self.base_model_prefix, self)
        if base_model is not self:
            base_model.set_input_embeddings(value)
        else:
            raise NotImplementedError
thomwolf's avatar
thomwolf committed
1234

1235
    def get_output_embeddings(self) -> nn.Module:
1236
1237
1238
1239
        """
        Returns the model's output embeddings.

        Returns:
1240
            `nn.Module`: A torch module mapping hidden states to vocabulary.
thomwolf's avatar
thomwolf committed
1241
        """
1242
        return None  # Overwrite for models with output embeddings
thomwolf's avatar
thomwolf committed
1243

1244
1245
1246
1247
    def _init_weights(self, module):
        """
        Initialize the weights. This method should be overridden by derived class.
        """
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
        pass

    def _initialize_weights(self, module):
        """
        Initialize the weights if they are not already initialized.
        """
        if getattr(module, "_is_hf_initialized", False):
            return
        self._init_weights(module)
        module._is_hf_initialized = True
1258

1259
    def tie_weights(self):
1260
1261
        """
        Tie the weights between the input embeddings and the output embeddings.
1262

Sylvain Gugger's avatar
Sylvain Gugger committed
1263
1264
        If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning the
        weights instead.
thomwolf's avatar
thomwolf committed
1265
        """
1266
1267
1268
1269
        if getattr(self.config, "tie_word_embeddings", True):
            output_embeddings = self.get_output_embeddings()
            if output_embeddings is not None:
                self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())
thomwolf's avatar
thomwolf committed
1270

1271
        if getattr(self.config, "is_encoder_decoder", False) and getattr(self.config, "tie_encoder_decoder", False):
Weizhen's avatar
Weizhen committed
1272
1273
            if hasattr(self, self.base_model_prefix):
                self = getattr(self, self.base_model_prefix)
1274
1275
            self._tie_encoder_decoder_weights(self.encoder, self.decoder, self.base_model_prefix)

Sylvain Gugger's avatar
Sylvain Gugger committed
1276
1277
1278
1279
        for module in self.modules():
            if hasattr(module, "_tie_weights"):
                module._tie_weights()

1280
1281
1282
    @staticmethod
    def _tie_encoder_decoder_weights(encoder: nn.Module, decoder: nn.Module, base_model_prefix: str):
        uninitialized_encoder_weights: List[str] = []
Weizhen's avatar
Weizhen committed
1283
1284
        if decoder.__class__ != encoder.__class__:
            logger.info(
Sylvain Gugger's avatar
Sylvain Gugger committed
1285
1286
                f"{decoder.__class__} and {encoder.__class__} are not equal. In this case make sure that all encoder"
                " weights are correctly initialized."
Weizhen's avatar
Weizhen committed
1287
            )
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297

        def tie_encoder_to_decoder_recursively(
            decoder_pointer: nn.Module,
            encoder_pointer: nn.Module,
            module_name: str,
            uninitialized_encoder_weights: List[str],
            depth=0,
        ):
            assert isinstance(decoder_pointer, nn.Module) and isinstance(
                encoder_pointer, nn.Module
1298
            ), f"{decoder_pointer} and {encoder_pointer} have to be of type nn.Module"
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
            if hasattr(decoder_pointer, "weight"):
                assert hasattr(encoder_pointer, "weight")
                encoder_pointer.weight = decoder_pointer.weight
                if hasattr(decoder_pointer, "bias"):
                    assert hasattr(encoder_pointer, "bias")
                    encoder_pointer.bias = decoder_pointer.bias
                return

            encoder_modules = encoder_pointer._modules
            decoder_modules = decoder_pointer._modules
            if len(decoder_modules) > 0:
                assert (
                    len(encoder_modules) > 0
                ), f"Encoder module {encoder_pointer} does not match decoder module {decoder_pointer}"

1314
                all_encoder_weights = {module_name + "/" + sub_name for sub_name in encoder_modules.keys()}
1315
1316
1317
1318
1319
                encoder_layer_pos = 0
                for name, module in decoder_modules.items():
                    if name.isdigit():
                        encoder_name = str(int(name) + encoder_layer_pos)
                        decoder_name = name
Weizhen's avatar
Weizhen committed
1320
1321
1322
                        if not isinstance(decoder_modules[decoder_name], type(encoder_modules[encoder_name])) and len(
                            encoder_modules
                        ) != len(decoder_modules):
1323
1324
                            # this can happen if the name corresponds to the position in a list module list of layers
                            # in this case the decoder has added a cross-attention that the encoder does not have
1325
                            # thus skip this step and subtract one layer pos from encoder
1326
1327
1328
1329
1330
1331
                            encoder_layer_pos -= 1
                            continue
                    elif name not in encoder_modules:
                        continue
                    elif depth > 500:
                        raise ValueError(
Sylvain Gugger's avatar
Sylvain Gugger committed
1332
1333
                            "Max depth of recursive function `tie_encoder_to_decoder` reached. It seems that there is"
                            " a circular dependency between two or more `nn.Modules` of your model."
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
                        )
                    else:
                        decoder_name = encoder_name = name
                    tie_encoder_to_decoder_recursively(
                        decoder_modules[decoder_name],
                        encoder_modules[encoder_name],
                        module_name + "/" + name,
                        uninitialized_encoder_weights,
                        depth=depth + 1,
                    )
                    all_encoder_weights.remove(module_name + "/" + encoder_name)

                uninitialized_encoder_weights += list(all_encoder_weights)

        # tie weights recursively
        tie_encoder_to_decoder_recursively(decoder, encoder, base_model_prefix, uninitialized_encoder_weights)
        if len(uninitialized_encoder_weights) > 0:
            logger.warning(
                f"The following encoder weights were not tied to the decoder {uninitialized_encoder_weights}"
            )

1355
    def _tie_or_clone_weights(self, output_embeddings, input_embeddings):
Lysandre's avatar
Lysandre committed
1356
        """Tie or clone module weights depending of whether we are using TorchScript or not"""
thomwolf's avatar
thomwolf committed
1357
        if self.config.torchscript:
1358
            output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())
thomwolf's avatar
thomwolf committed
1359
        else:
1360
            output_embeddings.weight = input_embeddings.weight
thomwolf's avatar
thomwolf committed
1361

Sam Shleifer's avatar
Sam Shleifer committed
1362
        if getattr(output_embeddings, "bias", None) is not None:
1363
            output_embeddings.bias.data = nn.functional.pad(
1364
                output_embeddings.bias.data,
Lysandre's avatar
Lysandre committed
1365
1366
1367
1368
                (
                    0,
                    output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0],
                ),
1369
1370
                "constant",
                0,
1371
            )
1372
        if hasattr(output_embeddings, "out_features") and hasattr(input_embeddings, "num_embeddings"):
1373
            output_embeddings.out_features = input_embeddings.num_embeddings
1374

1375
    def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
1376
        """
1377
        Resizes input token embeddings matrix of the model if `new_num_tokens != config.vocab_size`.
1378

1379
        Takes care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.
thomwolf's avatar
thomwolf committed
1380

1381
        Arguments:
1382
            new_num_tokens (`int`, *optional*):
1383
                The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
Sylvain Gugger's avatar
Sylvain Gugger committed
1384
1385
                vectors at the end. Reducing the size will remove vectors from the end. If not provided or `None`, just
                returns a pointer to the input tokens `torch.nn.Embedding` module of the model without doing anything.
1386
1387

        Return:
1388
            `torch.nn.Embedding`: Pointer to the input tokens Embeddings Module of the model.
thomwolf's avatar
thomwolf committed
1389
        """
1390
        model_embeds = self._resize_token_embeddings(new_num_tokens)
thomwolf's avatar
thomwolf committed
1391
1392
        if new_num_tokens is None:
            return model_embeds
thomwolf's avatar
thomwolf committed
1393
1394
1395

        # Update base model and current model config
        self.config.vocab_size = new_num_tokens
1396
        self.vocab_size = new_num_tokens
thomwolf's avatar
thomwolf committed
1397
1398

        # Tie weights again if needed
1399
        self.tie_weights()
thomwolf's avatar
thomwolf committed
1400

thomwolf's avatar
thomwolf committed
1401
1402
        return model_embeds

1403
    def _resize_token_embeddings(self, new_num_tokens):
thomwolf's avatar
thomwolf committed
1404
1405
1406
        old_embeddings = self.get_input_embeddings()
        new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
        self.set_input_embeddings(new_embeddings)
1407
1408
1409
1410
1411
1412
1413

        # if word embeddings are not tied, make sure that lm head is resized as well
        if self.get_output_embeddings() is not None and not self.config.tie_word_embeddings:
            old_lm_head = self.get_output_embeddings()
            new_lm_head = self._get_resized_lm_head(old_lm_head, new_num_tokens)
            self.set_output_embeddings(new_lm_head)

thomwolf's avatar
thomwolf committed
1414
        return self.get_input_embeddings()
1415

1416
    def _get_resized_embeddings(
1417
1418
        self, old_embeddings: nn.Embedding, new_num_tokens: Optional[int] = None
    ) -> nn.Embedding:
1419
1420
1421
        """
        Build a resized Embedding Module from a provided token Embedding Module. Increasing the size will add newly
        initialized vectors at the end. Reducing the size will remove vectors from the end
1422
1423

        Args:
1424
            old_embeddings (`torch.nn.Embedding`):
1425
                Old embeddings to be resized.
1426
            new_num_tokens (`int`, *optional*):
1427
                New number of tokens in the embedding matrix.
1428
1429

                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
1430
                vectors from the end. If not provided or `None`, just returns a pointer to the input tokens
1431
                `torch.nn.Embedding` module of the model without doing anything.
1432
1433

        Return:
1434
1435
            `torch.nn.Embedding`: Pointer to the resized Embedding Module or the old Embedding Module if
            `new_num_tokens` is `None`
1436
1437
1438
1439
        """
        if new_num_tokens is None:
            return old_embeddings

1440
1441
1442
1443
1444
1445
1446
1447
        if is_deepspeed_zero3_enabled():
            import deepspeed

            with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=None):
                old_num_tokens, old_embedding_dim = old_embeddings.weight.size()
        else:
            old_num_tokens, old_embedding_dim = old_embeddings.weight.size()

1448
1449
1450
        if old_num_tokens == new_num_tokens:
            return old_embeddings

1451
1452
        if not isinstance(old_embeddings, nn.Embedding):
            raise TypeError(
Sylvain Gugger's avatar
Sylvain Gugger committed
1453
1454
1455
                f"Old embeddings are of type {type(old_embeddings)}, which is not an instance of {nn.Embedding}. You"
                " should either use a different resize function or make sure that `old_embeddings` are an instance of"
                f" {nn.Embedding}."
1456
1457
            )

1458
        # Build new embeddings
1459
        new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)
1460
        new_embeddings.to(old_embeddings.weight.device, dtype=old_embeddings.weight.dtype)
1461
1462
1463
1464

        # initialize all new embeddings (in particular added tokens)
        self._init_weights(new_embeddings)

1465
        # Copy token embeddings from the previous weights
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476

        # numbers of tokens to copy
        n = min(old_num_tokens, new_num_tokens)
        if is_deepspeed_zero3_enabled():
            import deepspeed

            with deepspeed.zero.GatheredParameters(old_embeddings.weight, modifier_rank=0):
                if torch.distributed.get_rank() == 0:
                    new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]
        else:
            new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]
1477
1478
1479

        return new_embeddings

1480
    def _get_resized_lm_head(
1481
1482
        self, old_lm_head: nn.Linear, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False
    ) -> nn.Linear:
1483
1484
1485
1486
1487
        """
        Build a resized Linear Module from a provided old Linear Module. Increasing the size will add newly initialized
        vectors at the end. Reducing the size will remove vectors from the end

        Args:
1488
            old_lm_head (`torch.nn.Linear`):
1489
                Old lm head liner layer to be resized.
1490
            new_num_tokens (`int`, *optional*):
1491
1492
1493
                New number of tokens in the linear matrix.

                Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
1494
                vectors from the end. If not provided or `None`, just returns a pointer to the input tokens
1495
1496
1497
                `torch.nn.Linear` module of the model without doing anything. transposed (`bool`, *optional*, defaults
                to `False`): Whether `old_lm_head` is transposed or not. If True `old_lm_head.size()` is `lm_head_dim,
                vocab_size` else `vocab_size, lm_head_dim`.
1498
1499

        Return:
Sylvain Gugger's avatar
Sylvain Gugger committed
1500
1501
            `torch.nn.Linear`: Pointer to the resized Linear Module or the old Linear Module if `new_num_tokens` is
            `None`
1502
1503
1504
1505
        """
        if new_num_tokens is None:
            return old_lm_head

1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
        if is_deepspeed_zero3_enabled():
            import deepspeed

            with deepspeed.zero.GatheredParameters(old_lm_head.weight, modifier_rank=None):
                old_num_tokens, old_lm_head_dim = (
                    old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size()
                )
        else:
            old_num_tokens, old_lm_head_dim = (
                old_lm_head.weight.size() if not transposed else old_lm_head.weight.t().size()
            )
1517
1518
1519
1520
1521
1522

        if old_num_tokens == new_num_tokens:
            return old_lm_head

        if not isinstance(old_lm_head, nn.Linear):
            raise TypeError(
Sylvain Gugger's avatar
Sylvain Gugger committed
1523
1524
1525
                f"Old language model head is of type {type(old_lm_head)}, which is not an instance of {nn.Linear}. You"
                " should either use a different resize function or make sure that `old_lm_head` are an instance of"
                f" {nn.Linear}."
1526
1527
1528
1529
1530
            )

        # Build new lm head
        new_lm_head_shape = (old_lm_head_dim, new_num_tokens) if not transposed else (new_num_tokens, old_lm_head_dim)
        has_new_lm_head_bias = old_lm_head.bias is not None
1531
        new_lm_head = nn.Linear(*new_lm_head_shape, bias=has_new_lm_head_bias)
1532
        new_lm_head = new_lm_head.to(old_lm_head.weight.device, dtype=old_lm_head.weight.dtype)
1533
1534
1535
1536
1537
1538

        # initialize new lm head (in particular added tokens)
        self._init_weights(new_lm_head)

        num_tokens_to_copy = min(old_num_tokens, new_num_tokens)

1539
1540
1541
1542
        # XXX: put the long block of code in a wrapper
        if is_deepspeed_zero3_enabled():
            import deepspeed

1543
1544
            params = [old_lm_head.weight, old_lm_head.bias, new_lm_head.weight, new_lm_head.bias]
            with deepspeed.zero.GatheredParameters(params, modifier_rank=0):
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
                if torch.distributed.get_rank() == 0:
                    # Copy old lm head weights to new lm head
                    if not transposed:
                        new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[
                            :num_tokens_to_copy, :
                        ]
                    else:
                        new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[
                            :, :num_tokens_to_copy
                        ]

                    # Copy bias weights to new lm head
                    if has_new_lm_head_bias:
                        new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy]
1559
        else:
1560
1561
1562
1563
1564
            # Copy old lm head weights to new lm head
            if not transposed:
                new_lm_head.weight.data[:num_tokens_to_copy, :] = old_lm_head.weight.data[:num_tokens_to_copy, :]
            else:
                new_lm_head.weight.data[:, :num_tokens_to_copy] = old_lm_head.weight.data[:, :num_tokens_to_copy]
1565

1566
1567
1568
            # Copy bias weights to new lm head
            if has_new_lm_head_bias:
                new_lm_head.bias.data[:num_tokens_to_copy] = old_lm_head.bias.data[:num_tokens_to_copy]
1569
1570
1571

        return new_lm_head

1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
    def resize_position_embeddings(self, new_num_position_embeddings: int):
        raise NotImplementedError(
            f"`resize_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should "
            f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`"
        )

    def get_position_embeddings(self) -> Union[nn.Embedding, Tuple[nn.Embedding]]:
        raise NotImplementedError(
            f"`get_position_embeddings` is not implemented for {self.__class__}`. To implement it, you should "
            f"overwrite this method in the class {self.__class__} in `modeling_{self.__class__.__module__}.py`"
        )

1584
    def init_weights(self):
1585
        """
1586
1587
        If needed prunes and maybe initializes weights. If using a custom `PreTrainedModel`, you need to implement any
        initialization logic in `_init_weights`.
1588
        """
1589
1590
1591
1592
        # Prune heads if needed
        if self.config.pruned_heads:
            self.prune_heads(self.config.pruned_heads)

1593
1594
        if _init_weights:
            # Initialize weights
1595
            self.apply(self._initialize_weights)
1596
1597
1598
1599

            # Tie weights should be skipped when not initializing all weights
            # since from_pretrained(...) calls tie weights anyways
            self.tie_weights()
1600

1601
1602
1603
    def prune_heads(self, heads_to_prune: Dict[int, List[int]]):
        """
        Prunes heads of the base model.
1604

1605
        Arguments:
1606
            heads_to_prune (`Dict[int, List[int]]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1607
1608
1609
                Dictionary with keys being selected layer indices (`int`) and associated values being the list of heads
                to prune in said layer (list of `int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on
                layer 1 and heads 2 and 3 on layer 2.
thomwolf's avatar
thomwolf committed
1610
        """
1611
        # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads
1612
        for layer, heads in heads_to_prune.items():
1613
1614
1615
            union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)
            self.config.pruned_heads[layer] = list(union_heads)  # Unfortunately we have to store it as list for JSON

1616
        self.base_model._prune_heads(heads_to_prune)
thomwolf's avatar
thomwolf committed
1617

1618
    def gradient_checkpointing_enable(self):
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
        """
        Activates gradient checkpointing for the current model.

        Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint
        activations".
        """
        if not self.supports_gradient_checkpointing:
            raise ValueError(f"{self.__class__.__name__} does not support gradient checkpointing.")
        self.apply(partial(self._set_gradient_checkpointing, value=True))

1629
    def gradient_checkpointing_disable(self):
1630
1631
1632
1633
1634
1635
1636
1637
1638
        """
        Deactivates gradient checkpointing for the current model.

        Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint
        activations".
        """
        if self.supports_gradient_checkpointing:
            self.apply(partial(self._set_gradient_checkpointing, value=False))

1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
    @property
    def is_gradient_checkpointing(self) -> bool:
        """
        Whether gradient checkpointing is activated for this model or not.

        Note that in other frameworks this feature can be referred to as "activation checkpointing" or "checkpoint
        activations".
        """
        return any(hasattr(m, "gradient_checkpointing") and m.gradient_checkpointing for m in self.modules())

1649
1650
1651
    def save_pretrained(
        self,
        save_directory: Union[str, os.PathLike],
1652
        is_main_process: bool = True,
1653
1654
        state_dict: Optional[dict] = None,
        save_function: Callable = torch.save,
Sylvain Gugger's avatar
Sylvain Gugger committed
1655
        push_to_hub: bool = False,
Sylvain Gugger's avatar
Sylvain Gugger committed
1656
        max_shard_size: Union[int, str] = "10GB",
1657
        safe_serialization: bool = False,
1658
        variant: Optional[str] = None,
Sylvain Gugger's avatar
Sylvain Gugger committed
1659
        **kwargs,
1660
    ):
1661
1662
        """
        Save a model and its configuration file to a directory, so that it can be re-loaded using the
1663
        [`~PreTrainedModel.from_pretrained`] class method.
1664

1665
        Arguments:
1666
            save_directory (`str` or `os.PathLike`):
1667
                Directory to which to save. Will be created if it doesn't exist.
1668
1669
1670
1671
            is_main_process (`bool`, *optional*, defaults to `True`):
                Whether the process calling this is the main process or not. Useful when in distributed training like
                TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on
                the main process to avoid race conditions.
1672
            state_dict (nested dictionary of `torch.Tensor`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1673
1674
1675
                The state dictionary of the model to save. Will default to `self.state_dict()`, but can be used to only
                save parts of the model or if special precautions need to be taken when recovering the state dictionary
                of a model (like when using model parallelism).
1676
            save_function (`Callable`):
1677
                The function to use to save the state dictionary. Useful on distributed training like TPUs when one
1678
1679
                need to replace `torch.save` by another method.
            push_to_hub (`bool`, *optional*, defaults to `False`):
1680
1681
1682
                Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
                repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
                namespace).
Sylvain Gugger's avatar
Sylvain Gugger committed
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
            max_shard_size (`int` or `str`, *optional*, defaults to `"10GB"`):
                The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size
                lower than this size. If expressed as a string, needs to be digits followed by a unit (like `"5MB"`).

                <Tip warning={true}>

                If a single weight of the model is bigger than `max_shard_size`, it will be in its own checkpoint shard
                which will be bigger than `max_shard_size`.

                </Tip>

1694
1695
            safe_serialization (`bool`, *optional*, defaults to `False`):
                Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
1696
1697
            variant (`str`, *optional*):
                If specified, weights are saved in the format pytorch_model.<variant>.bin.
1698

Sylvain Gugger's avatar
Sylvain Gugger committed
1699
            kwargs:
1700
                Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
1701
        """
1702
1703
1704
1705
1706
1707
1708
1709
        # Checks if the model has been loaded in 8-bit
        if getattr(self, "is_loaded_in_8bit", False):
            warnings.warn(
                "You are calling `save_pretrained` to a 8-bit converted model you may likely encounter unexepected"
                " behaviors. ",
                UserWarning,
            )

1710
1711
1712
1713
1714
        if "save_config" in kwargs:
            warnings.warn(
                "`save_config` is deprecated and will be removed in v5 of Transformers. Use `is_main_process` instead."
            )
            is_main_process = kwargs.pop("save_config")
1715
1716
        if safe_serialization and not is_safetensors_available():
            raise ImportError("`safe_serialization` requires the `safetensors library: `pip install safetensors`.")
1717

1718
        if os.path.isfile(save_directory):
1719
            logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
1720
            return
1721

1722
1723
        os.makedirs(save_directory, exist_ok=True)

1724
1725
        if push_to_hub:
            commit_message = kwargs.pop("commit_message", None)
1726
            repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
1727
            repo_id = self._create_repo(repo_id, **kwargs)
1728
            files_timestamps = self._get_files_timestamps(save_directory)
1729

Julien Chaumond's avatar
Julien Chaumond committed
1730
        # Only save the model itself if we are using distributed training
1731
        model_to_save = unwrap_model(self)
1732

1733
1734
1735
1736
1737
        # save the string version of dtype to the config, e.g. convert torch.float32 => "float32"
        # we currently don't use this setting automatically, but may start to use with v5
        dtype = get_parameter_dtype(model_to_save)
        model_to_save.config.torch_dtype = str(dtype).split(".")[1]

Julien Chaumond's avatar
Julien Chaumond committed
1738
1739
1740
        # Attach architecture to the config
        model_to_save.config.architectures = [model_to_save.__class__.__name__]

1741
1742
1743
1744
1745
        # If we have a custom model, we copy the file defining it in the folder and set the attributes so it can be
        # loaded from the Hub.
        if self._auto_class is not None:
            custom_object_save(self, save_directory, config=self.config)

1746
        # Save the config
1747
        if is_main_process:
1748
            model_to_save.config.save_pretrained(save_directory)
1749
1750
            if self.can_generate():
                model_to_save.generation_config.save_pretrained(save_directory)
1751
1752
1753
1754

        # Save the model
        if state_dict is None:
            state_dict = model_to_save.state_dict()
1755

1756
1757
1758
1759
1760
        # Translate state_dict from smp to hf if saving with smp >= 1.10
        if IS_SAGEMAKER_MP_POST_1_10:
            for smp_to_hf, _ in smp.state.module_manager.translate_functions:
                state_dict = smp_to_hf(state_dict)

1761
        # Handle the case where some state_dict keys shouldn't be saved
1762
        if self._keys_to_ignore_on_save is not None:
1763
            for ignore_key in self._keys_to_ignore_on_save:
1764
1765
                if ignore_key in state_dict.keys():
                    del state_dict[ignore_key]
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
        if safe_serialization:
            # Safetensors does not allow tensor aliasing.
            # We're going to remove aliases before saving
            ptrs = collections.defaultdict(list)
            for name, tensor in state_dict.items():
                ptrs[tensor.data_ptr()].append(name)

            # These are all the pointers of shared tensors.
            shared_ptrs = {ptr: names for ptr, names in ptrs.items() if len(names) > 1}
            warn_names = set()
            for names in shared_ptrs.values():
                # Removing the keys which are declared as known duplicates on
                # load. This allows to make sure the name which is kept is consistent.
                if self._keys_to_ignore_on_load_missing is not None:
                    for name in names:
                        matches_pattern = any(re.search(pat, name) for pat in self._keys_to_ignore_on_load_missing)
                        if matches_pattern and name in state_dict:
                            del state_dict[name]

                # When not all duplicates have been cleaned, still remove those keys, but put a clear warning.
                # If the link between tensors was done at runtime then `from_pretrained` will not get
                # the key back leading to random tensor. A proper warning will be shown
                # during reload (if applicable), but since the file is not necessarily compatible with
                # the config, better show a proper warning.
                found = 0
                for name in names:
                    if name in state_dict:
                        found += 1
                        if found > 1:
                            del state_dict[name]
                            warn_names.add(name)
            if len(warn_names) > 0:
                logger.warning_once(
                    f"Removed shared tensor {warn_names} while saving. This should be OK, but check by verifying that you don't receive any warning while reloading",
                )
1801

Sylvain Gugger's avatar
Sylvain Gugger committed
1802
        # Shard the model if it is too big.
1803
        weights_name = SAFE_WEIGHTS_NAME if safe_serialization else WEIGHTS_NAME
1804
1805
        weights_name = _add_variant(weights_name, variant)

1806
        shards, index = shard_checkpoint(state_dict, max_shard_size=max_shard_size, weights_name=weights_name)
Sylvain Gugger's avatar
Sylvain Gugger committed
1807
1808
1809
1810

        # Clean the folder from a previous save
        for filename in os.listdir(save_directory):
            full_filename = os.path.join(save_directory, filename)
1811
1812
            # If we have a shard file that is not going to be replaced, we delete it, but only from the main process
            # in distributed settings to avoid race conditions.
1813
            weights_no_suffix = weights_name.replace(".bin", "").replace(".safetensors", "")
1814
1815
1816
1817
1818

            # make sure that file to be deleted matches format of sharded file, e.g. pytorch_model-00001-of-00005
            filename_no_suffix = filename.replace(".bin", "").replace(".safetensors", "")
            reg = re.compile("(.*?)-\d{5}-of-\d{5}")

1819
            if (
1820
                filename.startswith(weights_no_suffix)
1821
1822
1823
                and os.path.isfile(full_filename)
                and filename not in shards.keys()
                and is_main_process
1824
                and reg.fullmatch(filename_no_suffix) is not None
1825
            ):
Sylvain Gugger's avatar
Sylvain Gugger committed
1826
                os.remove(full_filename)
1827

Sylvain Gugger's avatar
Sylvain Gugger committed
1828
1829
        # Save the model
        for shard_file, shard in shards.items():
1830
1831
1832
1833
1834
1835
            if safe_serialization:
                # At some point we will need to deal better with save_function (used for TPU and other distributed
                # joyfulness), but for now this enough.
                safe_save_file(shard, os.path.join(save_directory, shard_file), metadata={"format": "pt"})
            else:
                save_function(shard, os.path.join(save_directory, shard_file))
Sylvain Gugger's avatar
Sylvain Gugger committed
1836
1837

        if index is None:
1838
1839
            path_to_weights = os.path.join(save_directory, _add_variant(WEIGHTS_NAME, variant))
            logger.info(f"Model weights saved in {path_to_weights}")
Sylvain Gugger's avatar
Sylvain Gugger committed
1840
        else:
1841
            save_index_file = SAFE_WEIGHTS_INDEX_NAME if safe_serialization else WEIGHTS_INDEX_NAME
1842
            save_index_file = os.path.join(save_directory, _add_variant(save_index_file, variant))
Sylvain Gugger's avatar
Sylvain Gugger committed
1843
1844
1845
1846
1847
1848
1849
1850
1851
            # Save the index as well
            with open(save_index_file, "w", encoding="utf-8") as f:
                content = json.dumps(index, indent=2, sort_keys=True) + "\n"
                f.write(content)
            logger.info(
                f"The model is bigger than the maximum size per checkpoint ({max_shard_size}) and is going to be "
                f"split in {len(shards)} checkpoint shards. You can find where each parameters has been saved in the "
                f"index located at {save_index_file}."
            )
1852

Sylvain Gugger's avatar
Sylvain Gugger committed
1853
        if push_to_hub:
1854
            self._upload_modified_files(
1855
1856
1857
1858
1859
                save_directory,
                repo_id,
                files_timestamps,
                commit_message=commit_message,
                token=kwargs.get("use_auth_token"),
1860
            )
Sylvain Gugger's avatar
Sylvain Gugger committed
1861

1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
    def get_memory_footprint(self, return_buffers=True):
        r"""
        Get the memory footprint of a model. This will return the memory footprint of the current model in bytes.
        Useful to benchmark the memory footprint of the current model and design some tests. Solution inspired from the
        PyTorch discussions: https://discuss.pytorch.org/t/gpu-memory-that-model-uses/56822/2

        Arguments:
            return_buffers (`bool`, *optional*, defaults to `True`):
                Whether to return the size of the buffer tensors in the computation of the memory footprint. Buffers
                are tensors that do not require gradients and not registered as parameters. E.g. mean and std in batch
                norm layers. Please see: https://discuss.pytorch.org/t/what-pytorch-means-by-buffers/120266/2
        """
        mem = sum([param.nelement() * param.element_size() for param in self.parameters()])
        if return_buffers:
            mem_bufs = sum([buf.nelement() * buf.element_size() for buf in self.buffers()])
            mem = mem + mem_bufs
        return mem

1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
    def to(self, *args, **kwargs):
        # Checks if the model has been loaded in 8-bit
        if getattr(self, "is_loaded_in_8bit", False):
            raise ValueError(
                "`.to` is not supported for `8-bit` models. Please use the model as it is, since the"
                " model has already been set to the correct devices and casted to the correct `dtype`."
            )
        else:
            return super().to(*args, **kwargs)

    def half(self, *args):
        # Checks if the model has been loaded in 8-bit
        if getattr(self, "is_loaded_in_8bit", False):
            raise ValueError(
                "`.half()` is not supported for `8-bit` models. Please use the model as it is, since the"
                " model has already been casted to the correct `dtype`."
            )
        else:
            return super().half(*args)

    def float(self, *args):
        # Checks if the model has been loaded in 8-bit
        if getattr(self, "is_loaded_in_8bit", False):
            raise ValueError(
                "`.float()` is not supported for `8-bit` models. Please use the model as it is, since the"
                " model has already been casted to the correct `dtype`."
            )
        else:
            return super().float(*args)

1910
    @classmethod
1911
    def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], *model_args, **kwargs):
1912
1913
        r"""
        Instantiate a pretrained pytorch model from a pre-trained model configuration.
1914

Sylvain Gugger's avatar
Sylvain Gugger committed
1915
1916
        The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
        the model, you should first set it back in training mode with `model.train()`.
1917

1918
        The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
1919
1920
        pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
        task.
1921

1922
        The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
1923
        weights are discarded.
1924

1925
        Parameters:
1926
            pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
1927
1928
                Can be either:

1929
                    - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
Sylvain Gugger's avatar
Sylvain Gugger committed
1930
1931
                      Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a
                      user or organization name, like `dbmdz/bert-base-german-cased`.
1932
1933
1934
                    - A path to a *directory* containing model weights saved using
                      [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
                    - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
Sylvain Gugger's avatar
Sylvain Gugger committed
1935
1936
1937
                      this case, `from_tf` should be set to `True` and a configuration object should be provided as
                      `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
                      PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
1938
                    - A path or url to a model folder containing a *flax checkpoint file* in *.msgpack* format (e.g,
Sylvain Gugger's avatar
Sylvain Gugger committed
1939
1940
                      `./flax_model/` containing `flax_model.msgpack`). In this case, `from_flax` should be set to
                      `True`.
1941
1942
1943
1944
1945
                    - `None` if you are both providing the configuration and state dictionary (resp. with keyword
                      arguments `config` and `state_dict`).
            model_args (sequence of positional arguments, *optional*):
                All remaining positional arguments will be passed to the underlying model's `__init__` method.
            config (`Union[PretrainedConfig, str, os.PathLike]`, *optional*):
1946
1947
                Can be either:

1948
1949
                    - an instance of a class derived from [`PretrainedConfig`],
                    - a string or path valid as input to [`~PretrainedConfig.from_pretrained`].
1950

1951
                Configuration for the model to use instead of an automatically loaded configuration. Configuration can
1952
1953
                be automatically loaded when:

1954
                    - The model is a model provided by the library (loaded with the *model id* string of a pretrained
1955
                      model).
Sylvain Gugger's avatar
Sylvain Gugger committed
1956
1957
                    - The model was saved using [`~PreTrainedModel.save_pretrained`] and is reloaded by supplying the
                      save directory.
1958
1959
1960
                    - The model is loaded by supplying a local directory as `pretrained_model_name_or_path` and a
                      configuration JSON file named *config.json* is found in the directory.
            state_dict (`Dict[str, torch.Tensor]`, *optional*):
1961
1962
1963
                A state dictionary to use instead of a state dictionary loaded from saved weights file.

                This option can be used if you want to create a model from a pretrained configuration but load your own
Sylvain Gugger's avatar
Sylvain Gugger committed
1964
                weights. In this case though, you should check if using [`~PreTrainedModel.save_pretrained`] and
1965
1966
                [`~PreTrainedModel.from_pretrained`] is not a simpler option.
            cache_dir (`Union[str, os.PathLike]`, *optional*):
1967
1968
                Path to a directory in which a downloaded pretrained model configuration should be cached if the
                standard cache should not be used.
1969
            from_tf (`bool`, *optional*, defaults to `False`):
1970
                Load the model weights from a TensorFlow checkpoint save file (see docstring of
1971
1972
                `pretrained_model_name_or_path` argument).
            from_flax (`bool`, *optional*, defaults to `False`):
1973
                Load the model weights from a Flax checkpoint save file (see docstring of
1974
1975
                `pretrained_model_name_or_path` argument).
            ignore_mismatched_sizes (`bool`, *optional*, defaults to `False`):
1976
1977
1978
                Whether or not to raise an error if some of the weights from the checkpoint do not have the same size
                as the weights of the model (if for instance, you are instantiating a model with 10 labels from a
                checkpoint with 3 labels).
1979
            force_download (`bool`, *optional*, defaults to `False`):
1980
1981
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
1982
            resume_download (`bool`, *optional*, defaults to `False`):
1983
1984
                Whether or not to delete incompletely received files. Will attempt to resume the download if such a
                file exists.
1985
            proxies (`Dict[str, str]`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
1986
1987
                A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
1988
            output_loading_info(`bool`, *optional*, defaults to `False`):
Sylvain Gugger's avatar
Sylvain Gugger committed
1989
                Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
1990
            local_files_only(`bool`, *optional*, defaults to `False`):
Stas Bekman's avatar
Stas Bekman committed
1991
                Whether or not to only look at local files (i.e., do not try to download the model).
1992
1993
1994
            use_auth_token (`str` or `bool`, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
                the token generated when running `huggingface-cli login` (stored in `~/.huggingface`).
1995
            revision (`str`, *optional*, defaults to `"main"`):
Julien Chaumond's avatar
Julien Chaumond committed
1996
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
1997
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
Julien Chaumond's avatar
Julien Chaumond committed
1998
                identifier allowed by git.
1999
2000
2001
2002
2003
2004
2005

                <Tip>

                To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>".

                </Tip>

2006
            mirror (`str`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
2007
2008
2009
                Mirror source to accelerate downloads in China. If you are from China and have an accessibility
                problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
                Please refer to the mirror site for more information.
2010
            _fast_init(`bool`, *optional*, defaults to `True`):
2011
2012
                Whether or not to disable fast initialization.

2013
2014
                <Tip warning={true}>

Sylvain Gugger's avatar
Sylvain Gugger committed
2015
2016
2017
                One should only disable *_fast_init* to ensure backwards compatibility with `transformers.__version__ <
                4.6.0` for seeded model initialization. This argument will be removed at the next major version. See
                [pull request 11471](https://github.com/huggingface/transformers/pull/11471) for more information.
2018

2019
                </Tip>
2020

2021
2022
2023
            > Parameters for big model inference

            low_cpu_mem_usage(`bool`, *optional*):
2024
2025
2026
                Tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
                This is an experimental feature and a subject to change at any moment.
            torch_dtype (`str` or `torch.dtype`, *optional*):
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
                Override the default `torch.dtype` and load the model under a specific `dtype`. The different options
                are:

                1. `torch.float16` or `torch.bfloat16` or `torch.float`: load in a specified
                  `dtype`, ignoring the model's `config.torch_dtype` if one exists. If not specified
                  - the model will get loaded in `torch.float` (fp32).

                2. `"auto"` - A `torch_dtype` entry in the `config.json` file of the model will be
                  attempted to be used. If this entry isn't found then next check the `dtype` of the first weight in
                  the checkpoint that's of a floating point type and use that as `dtype`. This will load the model
                  using the `dtype` it was saved in at the end of the training. It can't be used as an indicator of how
                  the model was trained. Since it could be trained in one of half precision dtypes, but saved in fp32.

                <Tip>

                For some models the `dtype` they were trained in is unknown - you may try to check the model's paper or
                reach out to the authors and ask them to add this information to the model's card and to insert the
                `torch_dtype` entry in `config.json` on the hub.

                </Tip>

2048
2049
2050
2051
2052
            device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
                A map that specifies where each submodule should go. It doesn't need to be refined to each
                parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
                same device.

2053
2054
                To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
                more information about each option see [designing a device
Patrick von Platen's avatar
Patrick von Platen committed
2055
                map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
2056
2057
2058
            max_memory (`Dict`, *optional*):
                A dictionary device identifier to maximum memory. Will default to the maximum memory available for each
                GPU and the available CPU RAM if unset.
2059
2060
            offload_folder (`str` or `os.PathLike`, *optional*):
                If the `device_map` contains any value `"disk"`, the folder where we will offload weights.
2061
            offload_state_dict (`bool`, *optional*):
2062
                If `True`, will temporarily offload the CPU state dict to the hard drive to avoid getting out of CPU
2063
2064
                RAM if the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to
                `True` when there is some disk offload.
2065
2066
2067
2068
2069
2070
            load_in_8bit (`bool`, *optional*, defaults to `False`):
                If `True`, will convert the loaded model into mixed-8bit quantized model. To use this feature please
                install `bitsandbytes` compiled with your CUDA version by running `pip install -i
                https://test.pypi.org/simple/ bitsandbytes-cudaXXX` where XXX is your CUDA version (e.g. 11.6 = 116).
                Make also sure that you have enough GPU RAM to store half of the model size since the 8bit modules are
                not compiled and adapted for CPUs.
2071
2072
2073
            quantization_config (`Dict`, *optional*):
                A dictionary of configuration parameters for the `bitsandbytes` library and loading the model using
                advanced features such as offloading in fp32 on CPU or on disk.
2074
2075
2076
            subfolder (`str`, *optional*, defaults to `""`):
                In case the relevant files are located inside a subfolder of the model repo on huggingface.co, you can
                specify the folder name here.
2077
2078
2079
            variant (`str`, *optional*):
                If specified load weights from `variant` filename, *e.g.* pytorch_model.<variant>.bin. `variant` is
                ignored when using `from_tf` or `from_flax`.
2080

2081
            kwargs (remaining dictionary of keyword arguments, *optional*):
2082
                Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
2083
                `output_attentions=True`). Behaves differently depending on whether a `config` is provided or
2084
2085
                automatically loaded:

2086
2087
                    - If a configuration is provided with `config`, `**kwargs` will be directly passed to the
                      underlying model's `__init__` method (we assume all relevant updates to the configuration have
2088
                      already been done)
2089
                    - If a configuration is not provided, `kwargs` will be first passed to the configuration class
Sylvain Gugger's avatar
Sylvain Gugger committed
2090
2091
2092
2093
                      initialization function ([`~PretrainedConfig.from_pretrained`]). Each key of `kwargs` that
                      corresponds to a configuration attribute will be used to override said attribute with the
                      supplied `kwargs` value. Remaining keys that do not correspond to any configuration attribute
                      will be passed to the underlying model's `__init__` function.
2094
2095
2096

        <Tip>

Sylvain Gugger's avatar
Sylvain Gugger committed
2097
2098
        Activate the special ["offline-mode"](https://huggingface.co/transformers/installation.html#offline-mode) to
        use this method in a firewalled environment.
2099
2100
2101
2102
2103
2104
2105

        </Tip>

        Examples:

        ```python
        >>> from transformers import BertConfig, BertModel
Sylvain Gugger's avatar
Sylvain Gugger committed
2106

2107
        >>> # Download model and configuration from huggingface.co and cache.
Sylvain Gugger's avatar
Sylvain Gugger committed
2108
        >>> model = BertModel.from_pretrained("bert-base-uncased")
2109
        >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable).
Sylvain Gugger's avatar
Sylvain Gugger committed
2110
        >>> model = BertModel.from_pretrained("./test/saved_model/")
2111
        >>> # Update configuration during loading.
Sylvain Gugger's avatar
Sylvain Gugger committed
2112
        >>> model = BertModel.from_pretrained("bert-base-uncased", output_attentions=True)
2113
2114
        >>> assert model.config.output_attentions == True
        >>> # Loading from a TF checkpoint file instead of a PyTorch model (slower, for example purposes, not runnable).
Sylvain Gugger's avatar
Sylvain Gugger committed
2115
2116
        >>> config = BertConfig.from_json_file("./tf_model/my_tf_model_config.json")
        >>> model = BertModel.from_pretrained("./tf_model/my_tf_checkpoint.ckpt.index", from_tf=True, config=config)
2117
        >>> # Loading from a Flax checkpoint file instead of a PyTorch model (slower)
Sylvain Gugger's avatar
Sylvain Gugger committed
2118
        >>> model = BertModel.from_pretrained("bert-base-uncased", from_flax=True)
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
        ```

        * `low_cpu_mem_usage` algorithm:

        This is an experimental function that loads the model using ~1x model size CPU memory

        Here is how it works:

        1. save which state_dict keys we have
        2. drop state_dict before the model is created, since the latter takes 1x model size CPU memory
        3. after the model has been instantiated switch to the meta device all params/buffers that
        are going to be replaced from the loaded state_dict
        4. load state_dict 2nd time
        5. replace the params/buffers from the state_dict

        Currently, it can't handle deepspeed ZeRO stage 3 and ignores loading errors

        """
2137
2138
2139
2140
        config = kwargs.pop("config", None)
        state_dict = kwargs.pop("state_dict", None)
        cache_dir = kwargs.pop("cache_dir", None)
        from_tf = kwargs.pop("from_tf", False)
2141
        from_flax = kwargs.pop("from_flax", False)
2142
        ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False)
2143
2144
2145
2146
        force_download = kwargs.pop("force_download", False)
        resume_download = kwargs.pop("resume_download", False)
        proxies = kwargs.pop("proxies", None)
        output_loading_info = kwargs.pop("output_loading_info", False)
2147
        local_files_only = kwargs.pop("local_files_only", False)
2148
        use_auth_token = kwargs.pop("use_auth_token", None)
Julien Chaumond's avatar
Julien Chaumond committed
2149
        revision = kwargs.pop("revision", None)
2150
        trust_remote_code = kwargs.pop("trust_remote_code", None)
Sylvain Gugger's avatar
Sylvain Gugger committed
2151
        _ = kwargs.pop("mirror", None)
2152
2153
        from_pipeline = kwargs.pop("_from_pipeline", None)
        from_auto_class = kwargs.pop("_from_auto", False)
2154
        _fast_init = kwargs.pop("_fast_init", True)
2155
        torch_dtype = kwargs.pop("torch_dtype", None)
2156
2157
        low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", None)
        device_map = kwargs.pop("device_map", None)
2158
        max_memory = kwargs.pop("max_memory", None)
2159
        offload_folder = kwargs.pop("offload_folder", None)
2160
2161
        offload_state_dict = kwargs.pop("offload_state_dict", False)
        load_in_8bit = kwargs.pop("load_in_8bit", False)
2162
        quantization_config = kwargs.pop("quantization_config", None)
2163
        subfolder = kwargs.pop("subfolder", "")
2164
        commit_hash = kwargs.pop("_commit_hash", None)
2165
        variant = kwargs.pop("variant", None)
2166
        use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False)
2167

2168
2169
2170
2171
2172
        if trust_remote_code is True:
            logger.warning(
                "The argument `trust_remote_code` is to be used with Auto classes. It has no effect here and is"
                " ignored."
            )
2173
2174
2175
2176
2177
2178
2179
        if device_map is not None:
            if low_cpu_mem_usage is None:
                low_cpu_mem_usage = True
            elif not low_cpu_mem_usage:
                raise ValueError("Passing along a `device_map` requires `low_cpu_mem_usage=True`")

        if low_cpu_mem_usage:
2180
            if device_map is not None:
2181
2182
                # The max memory utils require PyTorch >= 1.10 to have torch.cuda.mem_get_info.
                require_version_core("torch>=1.10")
2183
2184
2185
2186
2187
2188
2189
2190
2191

            if is_deepspeed_zero3_enabled():
                raise ValueError(
                    "DeepSpeed Zero-3 is not compatible with `low_cpu_mem_usage=True` or with passing a `device_map`."
                )
            elif not is_accelerate_available():
                raise ImportError(
                    "Using `low_cpu_mem_usage=True` or a `device_map` requires Accelerate: `pip install accelerate`"
                )
2192

2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
        if quantization_config is None:
            quantization_config, kwargs = BitsAndBytesConfig.from_dict(
                config_dict={"load_in_8bit": load_in_8bit}, return_unused_kwargs=True, **kwargs
            )
        elif quantization_config is not None:
            load_in_8bit = quantization_config.load_in_8bit

            quantization_config_kwargs = {
                k: v for k, v in kwargs.items() if k in inspect.signature(BitsAndBytesConfig).parameters
            }

            if len(quantization_config_kwargs) > 0:
                raise ValueError(
                    "You can't pass `load_in_8bit` or any other `BitsAndBytesConfig` argument as a kwarg when passing "
                    "`quantization_config` argument at the same time."
                )

2210
2211
2212
2213
2214
2215
2216
        if load_in_8bit:
            if not (is_accelerate_available() and is_bitsandbytes_available()):
                raise ImportError(
                    "Using `load_in_8bit=True` requires Accelerate: `pip install accelerate` and the latest version of"
                    " bitsandbytes `pip install -i https://test.pypi.org/simple/ bitsandbytes` or"
                    " pip install bitsandbytes` "
                )
2217
            if torch_dtype != torch.float16:
2218
                # We force the `dtype` to be float16, this is a requirement from `bitsandbytes`
2219
2220
2221
2222
2223
                logger.warning(
                    f"Overriding torch_dtype={torch_dtype} with `torch_dtype=torch.float16` due to "
                    "requirements of `bitsandbytes` to enable model loading in mixed int8. "
                    "Either pass torch_dtype=torch.float16 or don't pass this argument at all to remove this warning."
                )
2224
                torch_dtype = torch.float16
2225

2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
            if device_map is None:
                raise ValueError(
                    "A device map needs to be passed to run convert models into mixed-int8 format. Please run"
                    "`.from_pretrained` with `device_map='auto'`"
                )
            if from_tf or from_flax:
                raise ValueError(
                    "Converting into mixed 8-bit weights from tf/flax weights is currently not supported, please make"
                    " sure the weights are in PyTorch format."
                )

2237
        from_pt = not (from_tf | from_flax)
2238
2239
2240
2241

        user_agent = {"file_type": "model", "framework": "pytorch", "from_auto_class": from_auto_class}
        if from_pipeline is not None:
            user_agent["using_pipeline"] = from_pipeline
thomwolf's avatar
thomwolf committed
2242

2243
2244
2245
2246
        if is_offline_mode() and not local_files_only:
            logger.info("Offline mode: forcing local_files_only=True")
            local_files_only = True

2247
2248
2249
        # Load config if we don't provide a configuration
        if not isinstance(config, PretrainedConfig):
            config_path = config if config is not None else pretrained_model_name_or_path
2250
            config, model_kwargs = cls.config_class.from_pretrained(
2251
2252
2253
                config_path,
                cache_dir=cache_dir,
                return_unused_kwargs=True,
2254
                force_download=force_download,
2255
                resume_download=resume_download,
2256
                proxies=proxies,
2257
                local_files_only=local_files_only,
2258
                use_auth_token=use_auth_token,
Julien Chaumond's avatar
Julien Chaumond committed
2259
                revision=revision,
2260
                subfolder=subfolder,
2261
2262
                _from_auto=from_auto_class,
                _from_pipeline=from_pipeline,
2263
                **kwargs,
2264
2265
2266
            )
        else:
            model_kwargs = kwargs
2267

2268
2269
2270
        if commit_hash is None:
            commit_hash = getattr(config, "_commit_hash", None)

Sylvain Gugger's avatar
Sylvain Gugger committed
2271
2272
2273
2274
        # This variable will flag if we're loading a sharded checkpoint. In this case the archive file is just the
        # index of the files.
        is_sharded = False
        sharded_metadata = None
thomwolf's avatar
thomwolf committed
2275
        # Load model
Yih-Dar's avatar
Yih-Dar committed
2276
2277
        loading_info = None

2278
2279
2280
2281
        # Keep in fp32 modules
        keep_in_fp32_modules = None
        use_keep_in_fp32_modules = False

thomwolf's avatar
thomwolf committed
2282
        if pretrained_model_name_or_path is not None:
2283
            pretrained_model_name_or_path = str(pretrained_model_name_or_path)
2284
2285
            is_local = os.path.isdir(pretrained_model_name_or_path)
            if is_local:
2286
2287
2288
                if from_tf and os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index")
                ):
2289
                    # Load from a TF 1.0 checkpoint in priority if from_tf
2290
2291
2292
2293
                    archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index")
                elif from_tf and os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME)
                ):
2294
                    # Load from a TF 2.0 checkpoint in priority if from_tf
2295
2296
2297
2298
                    archive_file = os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME)
                elif from_flax and os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)
                ):
2299
                    # Load from a Flax checkpoint in priority if from_flax
2300
                    archive_file = os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)
2301
                elif use_safetensors is not False and os.path.isfile(
2302
                    os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant))
2303
2304
                ):
                    # Load from a safetensors checkpoint
2305
2306
2307
                    archive_file = os.path.join(
                        pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_NAME, variant)
                    )
2308
                elif use_safetensors is not False and os.path.isfile(
2309
2310
2311
                    os.path.join(
                        pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)
                    )
2312
2313
                ):
                    # Load from a sharded safetensors checkpoint
2314
2315
2316
                    archive_file = os.path.join(
                        pretrained_model_name_or_path, subfolder, _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)
                    )
2317
                    is_sharded = True
2318
2319
2320
                elif os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant))
                ):
thomwolf's avatar
thomwolf committed
2321
                    # Load from a PyTorch checkpoint
2322
2323
2324
2325
2326
2327
                    archive_file = os.path.join(
                        pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_NAME, variant)
                    )
                elif os.path.isfile(
                    os.path.join(pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant))
                ):
Sylvain Gugger's avatar
Sylvain Gugger committed
2328
                    # Load from a sharded PyTorch checkpoint
2329
2330
2331
                    archive_file = os.path.join(
                        pretrained_model_name_or_path, subfolder, _add_variant(WEIGHTS_INDEX_NAME, variant)
                    )
Sylvain Gugger's avatar
Sylvain Gugger committed
2332
                    is_sharded = True
2333
2334
                # At this stage we don't have a weight file so we will raise an error.
                elif os.path.isfile(
2335
2336
                    os.path.join(pretrained_model_name_or_path, subfolder, TF_WEIGHTS_NAME + ".index")
                ) or os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, TF2_WEIGHTS_NAME)):
2337
                    raise EnvironmentError(
2338
2339
2340
                        f"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory"
                        f" {pretrained_model_name_or_path} but there is a file for TensorFlow weights. Use"
                        " `from_tf=True` to load this model from those weights."
2341
                    )
2342
                elif os.path.isfile(os.path.join(pretrained_model_name_or_path, subfolder, FLAX_WEIGHTS_NAME)):
2343
                    raise EnvironmentError(
2344
2345
2346
                        f"Error no file named {_add_variant(WEIGHTS_NAME, variant)} found in directory"
                        f" {pretrained_model_name_or_path} but there is a file for Flax weights. Use `from_flax=True`"
                        " to load this model from those weights."
2347
                    )
thomwolf's avatar
thomwolf committed
2348
                else:
2349
                    raise EnvironmentError(
2350
2351
2352
                        f"Error no file named {_add_variant(WEIGHTS_NAME, variant)}, {TF2_WEIGHTS_NAME},"
                        f" {TF_WEIGHTS_NAME + '.index'} or {FLAX_WEIGHTS_NAME} found in directory"
                        f" {pretrained_model_name_or_path}."
2353
                    )
2354
            elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path)):
2355
                archive_file = pretrained_model_name_or_path
2356
                is_local = True
2357
            elif os.path.isfile(os.path.join(subfolder, pretrained_model_name_or_path + ".index")):
2358
2359
2360
2361
2362
                if not from_tf:
                    raise ValueError(
                        f"We found a TensorFlow checkpoint at {pretrained_model_name_or_path + '.index'}, please set "
                        "from_tf to True to load from this checkpoint."
                    )
2363
                archive_file = os.path.join(subfolder, pretrained_model_name_or_path + ".index")
2364
                is_local = True
2365
            elif is_remote_url(pretrained_model_name_or_path):
2366
                filename = pretrained_model_name_or_path
2367
                resolved_archive_file = download_url(pretrained_model_name_or_path)
2368
            else:
2369
2370
2371
2372
2373
                # set correct filename
                if from_tf:
                    filename = TF2_WEIGHTS_NAME
                elif from_flax:
                    filename = FLAX_WEIGHTS_NAME
2374
                elif use_safetensors is not False:
2375
                    filename = _add_variant(SAFE_WEIGHTS_NAME, variant)
2376
                else:
2377
                    filename = _add_variant(WEIGHTS_NAME, variant)
2378

2379
2380
                try:
                    # Load from URL or cache if already cached
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
                    cached_file_kwargs = {
                        "cache_dir": cache_dir,
                        "force_download": force_download,
                        "proxies": proxies,
                        "resume_download": resume_download,
                        "local_files_only": local_files_only,
                        "use_auth_token": use_auth_token,
                        "user_agent": user_agent,
                        "revision": revision,
                        "subfolder": subfolder,
                        "_raise_exceptions_for_missing_entries": False,
                        "_commit_hash": commit_hash,
                    }
2394
                    resolved_archive_file = cached_file(pretrained_model_name_or_path, filename, **cached_file_kwargs)
2395

2396
                    # Since we set _raise_exceptions_for_missing_entries=False, we don't get an exception but a None
2397
                    # result when internet is up, the repo and revision exist, but the file does not.
2398
                    if resolved_archive_file is None and filename == _add_variant(SAFE_WEIGHTS_NAME, variant):
2399
2400
                        # Maybe the checkpoint is sharded, we try to grab the index name in this case.
                        resolved_archive_file = cached_file(
2401
2402
2403
                            pretrained_model_name_or_path,
                            _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant),
                            **cached_file_kwargs,
2404
2405
2406
                        )
                        if resolved_archive_file is not None:
                            is_sharded = True
2407
2408
2409
2410
                        elif use_safetensors:
                            raise EnvironmentError(
                                f" {_add_variant(SAFE_WEIGHTS_NAME, variant)} or {_add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)} and thus cannot be loaded with `safetensors`. Please make sure that the model has been saved with `safe_serialization=True` or do not set `use_safetensors=True`."
                            )
2411
2412
                        else:
                            # This repo has no safetensors file of any kind, we switch to PyTorch.
2413
                            filename = _add_variant(WEIGHTS_NAME, variant)
2414
                            resolved_archive_file = cached_file(
2415
                                pretrained_model_name_or_path, filename, **cached_file_kwargs
2416
                            )
2417
                    if resolved_archive_file is None and filename == _add_variant(WEIGHTS_NAME, variant):
Sylvain Gugger's avatar
Sylvain Gugger committed
2418
                        # Maybe the checkpoint is sharded, we try to grab the index name in this case.
2419
                        resolved_archive_file = cached_file(
2420
2421
2422
                            pretrained_model_name_or_path,
                            _add_variant(WEIGHTS_INDEX_NAME, variant),
                            **cached_file_kwargs,
2423
                        )
2424
2425
2426
                        if resolved_archive_file is not None:
                            is_sharded = True
                    if resolved_archive_file is None:
Sylvain Gugger's avatar
Sylvain Gugger committed
2427
2428
2429
2430
2431
2432
2433
2434
2435
                        # Otherwise, maybe there is a TF or Flax model file.  We try those to give a helpful error
                        # message.
                        has_file_kwargs = {
                            "revision": revision,
                            "proxies": proxies,
                            "use_auth_token": use_auth_token,
                        }
                        if has_file(pretrained_model_name_or_path, TF2_WEIGHTS_NAME, **has_file_kwargs):
                            raise EnvironmentError(
Sylvain Gugger's avatar
Sylvain Gugger committed
2436
                                f"{pretrained_model_name_or_path} does not appear to have a file named"
2437
2438
                                f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file for TensorFlow weights."
                                " Use `from_tf=True` to load this model from those weights."
Sylvain Gugger's avatar
Sylvain Gugger committed
2439
2440
2441
                            )
                        elif has_file(pretrained_model_name_or_path, FLAX_WEIGHTS_NAME, **has_file_kwargs):
                            raise EnvironmentError(
Sylvain Gugger's avatar
Sylvain Gugger committed
2442
                                f"{pretrained_model_name_or_path} does not appear to have a file named"
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
                                f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file for Flax weights. Use"
                                " `from_flax=True` to load this model from those weights."
                            )
                        elif variant is not None and has_file(
                            pretrained_model_name_or_path, WEIGHTS_NAME, **has_file_kwargs
                        ):
                            raise EnvironmentError(
                                f"{pretrained_model_name_or_path} does not appear to have a file named"
                                f" {_add_variant(WEIGHTS_NAME, variant)} but there is a file without the variant"
                                f" {variant}. Use `variant=None` to load this model from those weights."
Sylvain Gugger's avatar
Sylvain Gugger committed
2453
2454
2455
                            )
                        else:
                            raise EnvironmentError(
2456
2457
2458
                                f"{pretrained_model_name_or_path} does not appear to have a file named"
                                f" {_add_variant(WEIGHTS_NAME, variant)}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or"
                                f" {FLAX_WEIGHTS_NAME}."
Sylvain Gugger's avatar
Sylvain Gugger committed
2459
                            )
2460
2461
2462
2463
2464
2465
                except EnvironmentError:
                    # Raise any environment error raise by `cached_file`. It will have a helpful error message adapted
                    # to the original exception.
                    raise
                except Exception:
                    # For any other exception, we throw a generic error.
2466
                    raise EnvironmentError(
2467
2468
2469
                        f"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it"
                        " from 'https://huggingface.co/models', make sure you don't have a local directory with the"
                        f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a"
2470
2471
                        f" directory containing a file named {_add_variant(WEIGHTS_NAME, variant)},"
                        f" {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME} or {FLAX_WEIGHTS_NAME}."
2472
                    )
2473

2474
            if is_local:
2475
                logger.info(f"loading weights file {archive_file}")
2476
                resolved_archive_file = archive_file
2477
            else:
2478
                logger.info(f"loading weights file {filename} from cache at {resolved_archive_file}")
2479
        else:
thomwolf's avatar
thomwolf committed
2480
            resolved_archive_file = None
2481

Sylvain Gugger's avatar
Sylvain Gugger committed
2482
2483
        # We'll need to download and cache each checkpoint shard if the checkpoint is sharded.
        if is_sharded:
2484
            # rsolved_archive_file becomes a list of files that point to the different checkpoint shards in this case.
Sylvain Gugger's avatar
Sylvain Gugger committed
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
            resolved_archive_file, sharded_metadata = get_checkpoint_shard_files(
                pretrained_model_name_or_path,
                resolved_archive_file,
                cache_dir=cache_dir,
                force_download=force_download,
                proxies=proxies,
                resume_download=resume_download,
                local_files_only=local_files_only,
                use_auth_token=use_auth_token,
                user_agent=user_agent,
                revision=revision,
2496
                subfolder=subfolder,
2497
                _commit_hash=commit_hash,
Sylvain Gugger's avatar
Sylvain Gugger committed
2498
2499
            )

2500
2501
        # load pt weights early so that we know which dtype to init the model under
        if from_pt:
2502
            if not is_sharded and state_dict is None:
Sylvain Gugger's avatar
Sylvain Gugger committed
2503
2504
                # Time to load the checkpoint
                state_dict = load_state_dict(resolved_archive_file)
2505

2506
2507
2508
            # set dtype to instantiate the model under:
            # 1. If torch_dtype is not None, we use that dtype
            # 2. If torch_dtype is "auto", we auto-detect dtype from the loaded state_dict, by checking its first
2509
            #    weights entry that is of a floating type - we assume all floating dtype weights are of the same dtype
2510
2511
            # we also may have config.torch_dtype available, but we won't rely on it till v5
            dtype_orig = None
2512

2513
2514
2515
            if torch_dtype is not None:
                if isinstance(torch_dtype, str):
                    if torch_dtype == "auto":
2516
2517
2518
                        if hasattr(config, "torch_dtype") and config.torch_dtype is not None:
                            torch_dtype = config.torch_dtype
                            logger.info(f"Will use torch_dtype={torch_dtype} as defined in model's config object")
Sylvain Gugger's avatar
Sylvain Gugger committed
2519
                        else:
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
                            if is_sharded and "dtype" in sharded_metadata:
                                torch_dtype = sharded_metadata["dtype"]
                            elif not is_sharded:
                                torch_dtype = get_state_dict_dtype(state_dict)
                            else:
                                one_state_dict = load_state_dict(resolved_archive_file[0])
                                torch_dtype = get_state_dict_dtype(one_state_dict)
                                del one_state_dict  # free CPU memory
                            logger.info(
                                "Since the `torch_dtype` attribute can't be found in model's config object, "
                                "will use torch_dtype={torch_dtype} as derived from model's weights"
                            )
2532
2533
                    else:
                        raise ValueError(
2534
                            f'`torch_dtype` can be either `torch.dtype` or `"auto"`, but received {torch_dtype}'
2535
2536
2537
                        )
                dtype_orig = cls._set_default_torch_dtype(torch_dtype)

2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
            # Check if `_keep_in_fp32_modules` is not None
            use_keep_in_fp32_modules = (
                (cls._keep_in_fp32_modules is not None) and is_accelerate_available() and torch_dtype == torch.float16
            )
            if (
                (cls._keep_in_fp32_modules is not None)
                and not is_accelerate_available()
                and torch_dtype == torch.float16
            ):
                logger.warning(
                    "For stability purposes, it is recommended to have accelerate installed when using this model in"
                    " torch.float16, please install it with `pip install accelerate`"
                )

2552
2553
2554
            if is_sharded:
                loaded_state_dict_keys = sharded_metadata["all_checkpoint_keys"]
            else:
2555
                loaded_state_dict_keys = list(state_dict.keys())
2556
            if low_cpu_mem_usage or use_keep_in_fp32_modules:
2557
                state_dict = None
2558

2559
2560
        config.name_or_path = pretrained_model_name_or_path

2561
        # Instantiate model.
2562
2563
        init_contexts = [no_init_weights(_enable=_fast_init)]

2564
2565
2566
2567
        if is_deepspeed_zero3_enabled():
            import deepspeed

            logger.info("Detected DeepSpeed ZeRO-3: activating zero.init() for this model")
2568
            init_contexts = [deepspeed.zero.Init(config_dict_or_path=deepspeed_config())] + init_contexts
2569
        elif load_in_8bit or low_cpu_mem_usage:
2570
2571
2572
2573
2574
            init_contexts.append(init_empty_weights())

        with ContextManagers(init_contexts):
            model = cls(config, *model_args, **model_kwargs)

2575
2576
2577
2578
2579
2580
2581
        # Check first if we are `from_pt`
        if use_keep_in_fp32_modules:
            low_cpu_mem_usage = True
            keep_in_fp32_modules = model._keep_in_fp32_modules
        else:
            keep_in_fp32_modules = []

2582
        if load_in_8bit:
2583
            from .utils.bitsandbytes import get_keys_to_not_convert, replace_8bit_linear
2584

2585
2586
2587
2588
            load_in_8bit_skip_modules = quantization_config.llm_int8_skip_modules
            load_in_8bit_threshold = quantization_config.llm_int8_threshold
            load_in_8bit_fp32_cpu_offload = quantization_config.llm_int8_enable_fp32_cpu_offload

2589
2590
            logger.info("Detected 8-bit loading: activating 8-bit loading for this model")

2591
2592
2593
2594
2595
            # We keep some modules such as the lm_head in their original dtype for numerical stability reasons
            if load_in_8bit_skip_modules is None:
                modules_to_not_convert = get_keys_to_not_convert(model)
            else:
                modules_to_not_convert = load_in_8bit_skip_modules
2596
2597
2598
2599
2600
2601

            if not isinstance(modules_to_not_convert, list):
                modules_to_not_convert = [modules_to_not_convert]

            modules_to_not_convert.extend(keep_in_fp32_modules)

2602
2603
2604
2605
2606
2607
2608
            # Extend the modules to not convert to keys that are supposed to be offloaded to `cpu` or `disk`
            if isinstance(device_map, dict) and len(device_map.keys()) > 1:
                keys_on_cpu = [key for key, value in device_map.items() if value in ["disk", "cpu"]]

                if len(keys_on_cpu) > 0 and not load_in_8bit_fp32_cpu_offload:
                    raise ValueError(
                        "If you want to offload some keys to `cpu` or `disk`, you need to set "
Younes Belkada's avatar
Younes Belkada committed
2609
                        "`llm_int8_enable_fp32_cpu_offload=True`. Note that these modules will not be "
2610
2611
2612
2613
2614
                        " converted to 8-bit but kept in 32-bit."
                    )

                modules_to_not_convert.extend(keys_on_cpu)

2615
2616
2617
            model = replace_8bit_linear(
                model, threshold=load_in_8bit_threshold, modules_to_not_convert=modules_to_not_convert
            )
2618

2619
2620
2621
2622
2623
            # training in 8-bit is only available in 0.37.0+
            model._is_int8_training_enabled = version.parse(
                importlib_metadata.version("bitsandbytes")
            ) >= version.parse("0.37.0")

2624
        if isinstance(device_map, str):
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
            special_dtypes = {}
            if load_in_8bit:
                special_dtypes.update(
                    {
                        name: torch_dtype
                        for name, _ in model.named_parameters()
                        if any(m in name for m in modules_to_not_convert)
                    }
                )

            special_dtypes.update(
                {
                    name: torch.float32
                    for name, _ in model.named_parameters()
                    if any(m in name for m in keep_in_fp32_modules)
                }
            )

2643
            if model._no_split_modules is None:
2644
                raise ValueError(f"{model.__class__.__name__} does not support `device_map='{device_map}'` yet.")
2645
            no_split_modules = model._no_split_modules
2646
2647
2648
2649
2650
2651
2652
            if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
                raise ValueError(
                    "If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
                    "'sequential'."
                )
            elif device_map in ["balanced", "balanced_low_0"] and get_balanced_memory is None:
                raise ValueError(f"`device_map={device_map}` requires a source install of Accelerate.")
2653

2654
            kwargs = {"no_split_module_classes": no_split_modules}
2655
2656
2657
2658
2659
2660
2661
            if "special_dtypes" in inspect.signature(infer_auto_device_map).parameters:
                kwargs["special_dtypes"] = special_dtypes
            elif len(special_dtypes) > 0:
                logger.warn(
                    "This model has some weights that should be kept in higher precision, you need to upgrade "
                    "`accelerate` to properly deal with them (`pip install --upgrade accelerate`)."
                )
2662
2663
2664
            if device_map != "sequential" and get_balanced_memory is not None:
                max_memory = get_balanced_memory(
                    model,
2665
                    dtype=torch_dtype if not load_in_8bit else torch.int8,
2666
                    low_zero=(device_map == "balanced_low_0"),
2667
                    max_memory=max_memory,
2668
                    **kwargs,
2669
                )
2670
            kwargs["max_memory"] = max_memory
2671
2672
            # Make sure tied weights are tied before creating the device map.
            model.tie_weights()
2673
            device_map = infer_auto_device_map(model, dtype=torch_dtype if not load_in_8bit else torch.int8, **kwargs)
2674

2675
            if load_in_8bit:
2676
                # The LM head / tied weights or any last module can stay on disk / CPU
2677
                device_map_without_lm_head = {
2678
                    key: device_map[key] for key in device_map.keys() if key not in modules_to_not_convert
2679
2680
                }
                if "cpu" in device_map_without_lm_head.values() or "disk" in device_map_without_lm_head.values():
2681
2682
2683
                    raise ValueError(
                        """
                        Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
2684
2685
2686
2687
2688
                        the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
                        these modules in 32-bit, you need to set `load_in_8bit_fp32_cpu_offload=True` and pass a custom
                        `device_map` to `from_pretrained`. Check
                        https://huggingface.co/docs/transformers/main/en/main_classes/quantization#offload-between-cpu-and-gpu
                        for more details.
2689
2690
                        """
                    )
2691
2692
                del device_map_without_lm_head

2693
        if from_tf:
2694
            if resolved_archive_file.endswith(".index"):
2695
2696
2697
2698
2699
                # Load from a TensorFlow 1.X checkpoint - provided by original authors
                model = cls.load_tf_weights(model, config, resolved_archive_file[:-6])  # Remove the '.index'
            else:
                # Load from our TensorFlow 2.0 checkpoints
                try:
2700
                    from .modeling_tf_pytorch_utils import load_tf2_checkpoint_in_pytorch_model
2701

Yih-Dar's avatar
Yih-Dar committed
2702
2703
2704
                    model, loading_info = load_tf2_checkpoint_in_pytorch_model(
                        model, resolved_archive_file, allow_missing_keys=True, output_loading_info=True
                    )
2705
                except ImportError:
2706
                    logger.error(
Sylvain Gugger's avatar
Sylvain Gugger committed
2707
2708
2709
                        "Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed."
                        " Please see https://pytorch.org/ and https://www.tensorflow.org/install/ for installation"
                        " instructions."
2710
                    )
2711
                    raise
2712
2713
2714
2715
2716
2717
2718
        elif from_flax:
            try:
                from .modeling_flax_pytorch_utils import load_flax_checkpoint_in_pytorch_model

                model = load_flax_checkpoint_in_pytorch_model(model, resolved_archive_file)
            except ImportError:
                logger.error(
Sylvain Gugger's avatar
Sylvain Gugger committed
2719
2720
2721
                    "Loading a Flax model in PyTorch, requires both PyTorch and Flax to be installed. Please see"
                    " https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for"
                    " installation instructions."
2722
2723
                )
                raise
2724
        elif from_pt:
2725
2726
2727
2728
            # restore default dtype
            if dtype_orig is not None:
                torch.set_default_dtype(dtype_orig)

Sylvain Gugger's avatar
Sylvain Gugger committed
2729
2730
2731
2732
2733
2734
2735
2736
            (
                model,
                missing_keys,
                unexpected_keys,
                mismatched_keys,
                offload_index,
                error_msgs,
            ) = cls._load_pretrained_model(
2737
2738
2739
2740
2741
2742
2743
2744
2745
                model,
                state_dict,
                loaded_state_dict_keys,  # XXX: rename?
                resolved_archive_file,
                pretrained_model_name_or_path,
                ignore_mismatched_sizes=ignore_mismatched_sizes,
                sharded_metadata=sharded_metadata,
                _fast_init=_fast_init,
                low_cpu_mem_usage=low_cpu_mem_usage,
2746
2747
2748
2749
                device_map=device_map,
                offload_folder=offload_folder,
                offload_state_dict=offload_state_dict,
                dtype=torch_dtype,
2750
                load_in_8bit=load_in_8bit,
2751
                keep_in_fp32_modules=keep_in_fp32_modules,
2752
            )
2753

Younes Belkada's avatar
Younes Belkada committed
2754
        model.is_loaded_in_8bit = load_in_8bit
2755

2756
2757
        # make sure token embedding weights are still tied if needed
        model.tie_weights()
2758

2759
        # Set model in evaluation mode to deactivate DropOut modules by default
2760
2761
        model.eval()

2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
        # If it is a model with generation capabilities, attempt to load the generation config
        if model.can_generate():
            try:
                model.generation_config = GenerationConfig.from_pretrained(
                    pretrained_model_name_or_path,
                    cache_dir=cache_dir,
                    force_download=force_download,
                    resume_download=resume_download,
                    proxies=proxies,
                    local_files_only=local_files_only,
                    use_auth_token=use_auth_token,
                    revision=revision,
                    subfolder=subfolder,
                    _from_auto=from_auto_class,
                    _from_pipeline=from_pipeline,
                    **kwargs,
                )
2779
            except (OSError, TypeError):
2780
2781
2782
2783
2784
                logger.info(
                    "Generation config file not found, using a generation config created from the model config."
                )
                pass

2785
2786
        # Dispatch model with hooks on all devices if necessary
        if device_map is not None:
Sylvain Gugger's avatar
Sylvain Gugger committed
2787
            dispatch_model(model, device_map=device_map, offload_dir=offload_folder, offload_index=offload_index)
2788

thomwolf's avatar
thomwolf committed
2789
        if output_loading_info:
Yih-Dar's avatar
Yih-Dar committed
2790
2791
2792
2793
2794
2795
2796
            if loading_info is None:
                loading_info = {
                    "missing_keys": missing_keys,
                    "unexpected_keys": unexpected_keys,
                    "mismatched_keys": mismatched_keys,
                    "error_msgs": error_msgs,
                }
thomwolf's avatar
thomwolf committed
2797
2798
            return model, loading_info

2799
2800
        return model

2801
    @classmethod
Sylvain Gugger's avatar
Sylvain Gugger committed
2802
2803
2804
2805
    def _load_pretrained_model(
        cls,
        model,
        state_dict,
2806
        loaded_keys,
Sylvain Gugger's avatar
Sylvain Gugger committed
2807
2808
2809
2810
2811
        resolved_archive_file,
        pretrained_model_name_or_path,
        ignore_mismatched_sizes=False,
        sharded_metadata=None,
        _fast_init=True,
2812
        low_cpu_mem_usage=False,
2813
2814
        device_map=None,
        offload_folder=None,
2815
        offload_state_dict=None,
2816
        dtype=None,
2817
        load_in_8bit=False,
2818
        keep_in_fp32_modules=None,
2819
    ):
Sylvain Gugger's avatar
Sylvain Gugger committed
2820
        is_safetensors = False
2821
2822
2823
        if load_in_8bit:
            from .utils.bitsandbytes import set_module_8bit_tensor_to_device

Sylvain Gugger's avatar
Sylvain Gugger committed
2824
        if device_map is not None and "disk" in device_map.values():
Sylvain Gugger's avatar
Sylvain Gugger committed
2825
2826
2827
2828
2829
            archive_file = (
                resolved_archive_file[0] if isinstance(resolved_archive_file, (list, tuple)) else resolved_archive_file
            )
            is_safetensors = archive_file.endswith(".safetensors")
            if offload_folder is None and not is_safetensors:
Sylvain Gugger's avatar
Sylvain Gugger committed
2830
2831
                raise ValueError(
                    "The current `device_map` had weights offloaded to the disk. Please provide an `offload_folder`"
Sylvain Gugger's avatar
Sylvain Gugger committed
2832
2833
                    " for them. Alternatively, make sure you have `safetensors` installed if the model you are using"
                    " offers the weights in this format."
Sylvain Gugger's avatar
Sylvain Gugger committed
2834
                )
Sylvain Gugger's avatar
Sylvain Gugger committed
2835
2836
            if offload_folder is not None:
                os.makedirs(offload_folder, exist_ok=True)
2837
2838
2839
            if offload_state_dict is None:
                offload_state_dict = True

2840
        is_sharded_safetensors = is_safetensors and sharded_metadata is not None
2841
        # Retrieve missing & unexpected_keys
2842
2843
        model_state_dict = model.state_dict()
        expected_keys = list(model_state_dict.keys())
2844
2845
        prefix = model.base_model_prefix

Sylvain Gugger's avatar
Sylvain Gugger committed
2846
2847
2848
2849
2850
2851
2852
        def _fix_key(key):
            if "beta" in key:
                return key.replace("beta", "bias")
            if "gamma" in key:
                return key.replace("gamma", "weight")
            return key

2853
        original_loaded_keys = loaded_keys
Sylvain Gugger's avatar
Sylvain Gugger committed
2854
2855
        loaded_keys = [_fix_key(key) for key in loaded_keys]

2856
2857
2858
2859
2860
2861
        if len(prefix) > 0:
            has_prefix_module = any(s.startswith(prefix) for s in loaded_keys)
            expects_prefix_module = any(s.startswith(prefix) for s in expected_keys)
        else:
            has_prefix_module = False
            expects_prefix_module = False
Patrick von Platen's avatar
Patrick von Platen committed
2862
2863
2864

        # key re-naming operations are never done on the keys
        # that are loaded, but always on the keys of the newly initialized model
2865
2866
        remove_prefix_from_model = not has_prefix_module and expects_prefix_module
        add_prefix_to_model = has_prefix_module and not expects_prefix_module
2867

2868
        if remove_prefix_from_model:
2869
2870
2871
            _prefix = f"{prefix}."
            expected_keys_not_prefixed = [s for s in expected_keys if not s.startswith(_prefix)]
            expected_keys = [s[len(_prefix) :] if s.startswith(_prefix) else s for s in expected_keys]
2872
        elif add_prefix_to_model:
2873
2874
2875
2876
2877
            expected_keys = [".".join([prefix, s]) for s in expected_keys]

        missing_keys = list(set(expected_keys) - set(loaded_keys))
        unexpected_keys = list(set(loaded_keys) - set(expected_keys))

2878
2879
2880
2881
2882
        # Some tensors maybe have been already filled by another key (tied weights).
        existing_ptrs = {model_state_dict[k].data_ptr() for k in loaded_keys if k in model_state_dict}
        missing_keys = [
            k for k in missing_keys if k in model_state_dict and model_state_dict[k].data_ptr() not in existing_ptrs
        ]
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
        # Some models may have keys that are not in the state by design, removing them before needlessly warning
        # the user.
        if cls._keys_to_ignore_on_load_missing is not None:
            for pat in cls._keys_to_ignore_on_load_missing:
                missing_keys = [k for k in missing_keys if re.search(pat, k) is None]

        if cls._keys_to_ignore_on_load_unexpected is not None:
            for pat in cls._keys_to_ignore_on_load_unexpected:
                unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]

2893
2894
2895
2896
        # retrieve weights on meta device and put them back on CPU.
        # This is not ideal in terms of memory, but if we don't do that not, we can't initialize them in the next step
        if low_cpu_mem_usage:
            for key in missing_keys:
Susnato Dhar's avatar
Susnato Dhar committed
2897
2898
2899
2900
2901
                if key in list(model_state_dict.keys()):
                    key = key
                elif f"{prefix}.key" in list(model_state_dict.keys()):
                    key = f"{prefix}.key"
                elif key.startswith(prefix) and ".".join(key.split(".")[1:]) in list(model_state_dict.keys()):
2902
2903
                    key = ".".join(key.split(".")[1:])
                param = model_state_dict[key]
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913

                # upcast in fp32 if any
                target_dtype = dtype
                if (
                    keep_in_fp32_modules is not None
                    and dtype == torch.float16
                    and any(module_to_keep_in_fp32 in key for module_to_keep_in_fp32 in keep_in_fp32_modules)
                ):
                    target_dtype = torch.float32

2914
                if param.device == torch.device("meta"):
2915
                    if not load_in_8bit:
2916
                        set_module_tensor_to_device(model, key, "cpu", torch.empty(*param.size(), dtype=target_dtype))
2917
                    else:
2918
2919
2920
                        set_module_8bit_tensor_to_device(
                            model, key, "cpu", torch.empty(*param.size(), dtype=target_dtype)
                        )
2921
2922

        # retrieve unintialized modules and initialize before maybe overriding that with the pretrained weights.
2923
        if _fast_init:
2924
2925
2926
2927
2928
2929
2930
2931
2932
            if remove_prefix_from_model:
                _loaded_keys = [f"{prefix}.{k}" for k in loaded_keys]
            elif add_prefix_to_model:
                _loaded_keys = [k[len(prefix) + 1 :] for k in loaded_keys]
            else:
                _loaded_keys = loaded_keys
            set_initialized_submodules(model, _loaded_keys)
            # This will only initialize submodules that are not marked as initialized by the line above.
            model.apply(model._initialize_weights)
2933

2934
2935
2936
2937
2938
2939
        # Set some modules to fp32 if any
        if keep_in_fp32_modules is not None:
            for name, param in model.named_parameters():
                if any(module_to_keep_in_fp32 in name for module_to_keep_in_fp32 in keep_in_fp32_modules):
                    param = param.to(torch.float32)

2940
2941
2942
        # Make sure we are able to load base models as well as derived models (with heads)
        start_prefix = ""
        model_to_load = model
2943
        if len(cls.base_model_prefix) > 0 and not hasattr(model, cls.base_model_prefix) and has_prefix_module:
2944
            start_prefix = cls.base_model_prefix + "."
2945
        if len(cls.base_model_prefix) > 0 and hasattr(model, cls.base_model_prefix) and not has_prefix_module:
2946
            model_to_load = getattr(model, cls.base_model_prefix)
Sylvain Gugger's avatar
Sylvain Gugger committed
2947
2948
            base_model_expected_keys = list(model_to_load.state_dict().keys())
            if any(key in expected_keys_not_prefixed and key not in base_model_expected_keys for key in loaded_keys):
2949
                raise ValueError(
2950
                    "The state dictionary of the model you are trying to load is corrupted. Are you sure it was "
2951
2952
                    "properly saved?"
                )
2953
2954
            if device_map is not None:
                device_map = {k.replace(f"{cls.base_model_prefix}.", ""): v for k, v in device_map.items()}
2955

2956
2957
2958
2959
2960
2961
2962
2963
        def _find_mismatched_keys(
            state_dict,
            model_state_dict,
            loaded_keys,
            add_prefix_to_model,
            remove_prefix_from_model,
            ignore_mismatched_sizes,
        ):
Sylvain Gugger's avatar
Sylvain Gugger committed
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
            mismatched_keys = []
            if ignore_mismatched_sizes:
                for checkpoint_key in loaded_keys:
                    model_key = checkpoint_key
                    if remove_prefix_from_model:
                        # The model key starts with `prefix` but `checkpoint_key` doesn't so we add it.
                        model_key = f"{prefix}.{checkpoint_key}"
                    elif add_prefix_to_model:
                        # The model key doesn't start with `prefix` but `checkpoint_key` does so we remove it.
                        model_key = ".".join(checkpoint_key.split(".")[1:])

                    if (
                        model_key in model_state_dict
                        and state_dict[checkpoint_key].shape != model_state_dict[model_key].shape
                    ):
                        mismatched_keys.append(
                            (checkpoint_key, state_dict[checkpoint_key].shape, model_state_dict[model_key].shape)
                        )
                        del state_dict[checkpoint_key]
2983
2984
            return mismatched_keys

2985
2986
2987
2988
        if resolved_archive_file is not None:
            folder = os.path.sep.join(resolved_archive_file[0].split(os.path.sep)[:-1])
        else:
            folder = None
Sylvain Gugger's avatar
Sylvain Gugger committed
2989
        if device_map is not None and is_safetensors:
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
            param_device_map = expand_device_map(device_map, original_loaded_keys)

            str_dtype = str(dtype).replace("torch.", "") if dtype is not None else "float32"
            if sharded_metadata is None:
                archive_file = (
                    resolved_archive_file[0]
                    if isinstance(resolved_archive_file, (list, tuple))
                    else resolved_archive_file
                )
                weight_map = {p: archive_file for p in original_loaded_keys}
            else:
                weight_map = {p: os.path.join(folder, f) for p, f in sharded_metadata["weight_map"].items()}
Sylvain Gugger's avatar
Sylvain Gugger committed
3002
            offload_index = {
3003
3004
                p: {"safetensors_file": f, "weight_name": p, "dtype": str_dtype}
                for p, f in weight_map.items()
Sylvain Gugger's avatar
Sylvain Gugger committed
3005
3006
3007
                if param_device_map[p] == "disk"
            }

3008
3009
3010
3011
3012
        if state_dict is not None:
            # Whole checkpoint
            mismatched_keys = _find_mismatched_keys(
                state_dict,
                model_state_dict,
3013
                original_loaded_keys,
3014
3015
3016
3017
                add_prefix_to_model,
                remove_prefix_from_model,
                ignore_mismatched_sizes,
            )
Sylvain Gugger's avatar
Sylvain Gugger committed
3018
            error_msgs = _load_state_dict_into_model(model_to_load, state_dict, start_prefix)
Sylvain Gugger's avatar
Sylvain Gugger committed
3019
            offload_index = None
Sylvain Gugger's avatar
Sylvain Gugger committed
3020
        else:
3021
3022
            # Sharded checkpoint or whole but low_cpu_mem_usage==True

Sylvain Gugger's avatar
Sylvain Gugger committed
3023
3024
3025
3026
3027
            # This should always be a list but, just to be sure.
            if not isinstance(resolved_archive_file, list):
                resolved_archive_file = [resolved_archive_file]

            error_msgs = []
3028
            mismatched_keys = []
Sylvain Gugger's avatar
Sylvain Gugger committed
3029
3030
            if not is_safetensors:
                offload_index = {} if device_map is not None and "disk" in device_map.values() else None
3031
3032
3033
3034
3035
3036
3037
            if offload_state_dict:
                state_dict_folder = tempfile.mkdtemp()
                state_dict_index = {}
            else:
                state_dict_folder = None
                state_dict_index = None

3038
            if is_sharded_safetensors:
Sylvain Gugger's avatar
Sylvain Gugger committed
3039
3040
3041
3042
3043
                disk_only_shard_files = get_disk_only_shard_files(device_map, sharded_metadata=sharded_metadata)
                disk_only_shard_files = [os.path.join(folder, f) for f in disk_only_shard_files]
            else:
                disk_only_shard_files = []

3044
3045
            if len(resolved_archive_file) > 1:
                resolved_archive_file = logging.tqdm(resolved_archive_file, desc="Loading checkpoint shards")
Sylvain Gugger's avatar
Sylvain Gugger committed
3046
            for shard_file in resolved_archive_file:
Sylvain Gugger's avatar
Sylvain Gugger committed
3047
3048
3049
                # Skip the load for shards that only contain disk-offloaded weights when using safetensors for the offload.
                if shard_file in disk_only_shard_files:
                    continue
Sylvain Gugger's avatar
Sylvain Gugger committed
3050
                state_dict = load_state_dict(shard_file)
3051

Sylvain Gugger's avatar
Sylvain Gugger committed
3052
3053
                # Mistmatched keys contains tuples key/shape1/shape2 of weights in the checkpoint that have a shape not
                # matching the weights in the model.
3054
3055
3056
                mismatched_keys += _find_mismatched_keys(
                    state_dict,
                    model_state_dict,
3057
                    original_loaded_keys,
3058
3059
3060
3061
                    add_prefix_to_model,
                    remove_prefix_from_model,
                    ignore_mismatched_sizes,
                )
3062
3063

                if low_cpu_mem_usage:
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074
3075
                    new_error_msgs, offload_index, state_dict_index = _load_state_dict_into_meta_model(
                        model_to_load,
                        state_dict,
                        loaded_keys,
                        start_prefix,
                        expected_keys,
                        device_map=device_map,
                        offload_folder=offload_folder,
                        offload_index=offload_index,
                        state_dict_folder=state_dict_folder,
                        state_dict_index=state_dict_index,
                        dtype=dtype,
3076
                        load_in_8bit=load_in_8bit,
Sylvain Gugger's avatar
Sylvain Gugger committed
3077
                        is_safetensors=is_safetensors,
3078
                        keep_in_fp32_modules=keep_in_fp32_modules,
3079
                    )
3080
                    error_msgs += new_error_msgs
3081
3082
                else:
                    error_msgs += _load_state_dict_into_model(model_to_load, state_dict, start_prefix)
3083

3084
3085
3086
3087
                # force memory release
                del state_dict
                gc.collect()

3088
            if offload_index is not None and len(offload_index) > 0:
Sylvain Gugger's avatar
Sylvain Gugger committed
3089
3090
3091
                if model != model_to_load:
                    # We need to add the prefix of the base model
                    prefix = cls.base_model_prefix
Sylvain Gugger's avatar
Sylvain Gugger committed
3092
3093
3094
3095
3096
3097
                    if not is_safetensors:
                        for weight_name in offload_index:
                            shutil.move(
                                os.path.join(offload_folder, f"{weight_name}.dat"),
                                os.path.join(offload_folder, f"{prefix}.{weight_name}.dat"),
                            )
Sylvain Gugger's avatar
Sylvain Gugger committed
3098
                    offload_index = {f"{prefix}.{key}": value for key, value in offload_index.items()}
Sylvain Gugger's avatar
Sylvain Gugger committed
3099
3100
3101
                if not is_safetensors:
                    save_offload_index(offload_index, offload_folder)
                    offload_index = None
3102
3103
3104

            if offload_state_dict:
                # Load back temporarily offloaded state dict
3105
                load_offloaded_weights(model_to_load, state_dict_index, state_dict_folder)
3106
3107
                shutil.rmtree(state_dict_folder)

3108
3109
        if len(error_msgs) > 0:
            error_msg = "\n\t".join(error_msgs)
3110
3111
3112
3113
            if "size mismatch" in error_msg:
                error_msg += (
                    "\n\tYou may consider adding `ignore_mismatched_sizes=True` in the model `from_pretrained` method."
                )
3114
3115
            raise RuntimeError(f"Error(s) in loading state_dict for {model.__class__.__name__}:\n\t{error_msg}")

3116
3117
        if len(unexpected_keys) > 0:
            logger.warning(
Sylvain Gugger's avatar
Sylvain Gugger committed
3118
3119
3120
3121
3122
3123
3124
                f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when"
                f" initializing {model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are"
                f" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or"
                " with another architecture (e.g. initializing a BertForSequenceClassification model from a"
                " BertForPreTraining model).\n- This IS NOT expected if you are initializing"
                f" {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical"
                " (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
3125
3126
3127
3128
3129
            )
        else:
            logger.info(f"All model checkpoint weights were used when initializing {model.__class__.__name__}.\n")
        if len(missing_keys) > 0:
            logger.warning(
Sylvain Gugger's avatar
Sylvain Gugger committed
3130
3131
3132
                f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at"
                f" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\nYou should probably"
                " TRAIN this model on a down-stream task to be able to use it for predictions and inference."
3133
            )
3134
        elif len(mismatched_keys) == 0:
3135
            logger.info(
Sylvain Gugger's avatar
Sylvain Gugger committed
3136
3137
3138
3139
                f"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at"
                f" {pretrained_model_name_or_path}.\nIf your task is similar to the task the model of the checkpoint"
                f" was trained on, you can already use {model.__class__.__name__} for predictions without further"
                " training."
3140
            )
3141
3142
3143
3144
3145
3146
3147
3148
        if len(mismatched_keys) > 0:
            mismatched_warning = "\n".join(
                [
                    f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
                    for key, shape1, shape2 in mismatched_keys
                ]
            )
            logger.warning(
Sylvain Gugger's avatar
Sylvain Gugger committed
3149
3150
3151
3152
                f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at"
                f" {pretrained_model_name_or_path} and are newly initialized because the shapes did not"
                f" match:\n{mismatched_warning}\nYou should probably TRAIN this model on a down-stream task to be able"
                " to use it for predictions and inference."
3153
            )
3154

Sylvain Gugger's avatar
Sylvain Gugger committed
3155
        return model, missing_keys, unexpected_keys, mismatched_keys, offload_index, error_msgs
3156
3157

    def retrieve_modules_from_names(self, names, add_prefix=False, remove_prefix=False):
3158
        module_keys = {".".join(key.split(".")[:-1]) for key in names}
3159

Patrick von Platen's avatar
Patrick von Platen committed
3160
3161
        # torch.nn.ParameterList is a special case where two parameter keywords
        # are appended to the module name, *e.g.* bert.special_embeddings.0
3162
        module_keys = module_keys.union(
3163
            {".".join(key.split(".")[:-2]) for key in names if len(key) > 0 and key[-1].isdigit()}
3164
        )
Patrick von Platen's avatar
Patrick von Platen committed
3165

3166
3167
3168
3169
        retrieved_modules = []
        # retrieve all modules that has at least one missing weight name
        for name, module in self.named_modules():
            if remove_prefix:
3170
3171
                _prefix = f"{self.base_model_prefix}."
                name = name[len(_prefix) :] if name.startswith(_prefix) else name
3172
            elif add_prefix:
Patrick von Platen's avatar
Patrick von Platen committed
3173
                name = ".".join([self.base_model_prefix, name]) if len(name) > 0 else self.base_model_prefix
3174
3175
3176
3177
3178
3179

            if name in module_keys:
                retrieved_modules.append(module)

        return retrieved_modules

3180
    @staticmethod
3181
    def _load_pretrained_model_low_mem(model, loaded_state_dict_keys, resolved_archive_file, start_prefix=""):
3182
3183
3184
        """
        This is an experimental function that loads the model using ~1.x model size CPU memory

3185
        Before you call it do:
3186

3187
        1. save which state_dict keys are available
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
        2. drop state_dict before model is created, since the latter takes 1x model size memory

        Here then we continue:

        3. switch to the meta device all params/buffers that are going to be replaced from the loaded state_dict
        4. load state_dict 2nd time
        5. replace the params/buffers from the state_dict

        Currently, it doesn't handle missing_keys, unexpected_keys, mismatched_keys. It can't handle deepspeed.
        """

3199
3200
3201
3202
        _move_model_to_meta(model, loaded_state_dict_keys, start_prefix)
        state_dict = load_state_dict(resolved_archive_file)
        error_msgs = _load_state_dict_into_meta_model(model, state_dict, loaded_state_dict_keys, start_prefix)
        return error_msgs
3203

3204
3205
3206
3207
3208
3209
    @classmethod
    def register_for_auto_class(cls, auto_class="AutoModel"):
        """
        Register this class with a given auto class. This should only be used for custom models as the ones in the
        library are already mapped with an auto class.

3210
3211
3212
3213
3214
3215
        <Tip warning={true}>

        This API is experimental and may have some slight breaking changes in the next releases.

        </Tip>

3216
3217
3218
3219
3220
3221
3222
3223
3224
3225
3226
3227
3228
3229
        Args:
            auto_class (`str` or `type`, *optional*, defaults to `"AutoModel"`):
                The auto class to register this new model with.
        """
        if not isinstance(auto_class, str):
            auto_class = auto_class.__name__

        import transformers.models.auto as auto_module

        if not hasattr(auto_module, auto_class):
            raise ValueError(f"{auto_class} is not a valid auto class.")

        cls._auto_class = auto_class

thomwolf's avatar
thomwolf committed
3230

3231
PreTrainedModel.push_to_hub = copy_func(PreTrainedModel.push_to_hub)
3232
3233
3234
3235
if PreTrainedModel.push_to_hub.__doc__ is not None:
    PreTrainedModel.push_to_hub.__doc__ = PreTrainedModel.push_to_hub.__doc__.format(
        object="model", object_class="AutoModel", object_files="model file"
    )
3236
3237


thomwolf's avatar
thomwolf committed
3238
class PoolerStartLogits(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
3239
3240
    """
    Compute SQuAD start logits from sequence hidden states.
3241

Sylvain Gugger's avatar
Sylvain Gugger committed
3242
    Args:
3243
3244
        config ([`PretrainedConfig`]):
            The config used by the model, will be used to grab the `hidden_size` of the model.
Sylvain Gugger's avatar
Sylvain Gugger committed
3245
3246
3247
    """

    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
3248
        super().__init__()
thomwolf's avatar
thomwolf committed
3249
3250
        self.dense = nn.Linear(config.hidden_size, 1)

Sylvain Gugger's avatar
Sylvain Gugger committed
3251
3252
3253
3254
3255
    def forward(
        self, hidden_states: torch.FloatTensor, p_mask: Optional[torch.FloatTensor] = None
    ) -> torch.FloatTensor:
        """
        Args:
3256
            hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`):
Sylvain Gugger's avatar
Sylvain Gugger committed
3257
                The final hidden states of the model.
3258
            p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
3259
3260
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
Sylvain Gugger's avatar
Sylvain Gugger committed
3261
3262

        Returns:
3263
            `torch.FloatTensor`: The start logits for SQuAD.
thomwolf's avatar
thomwolf committed
3264
        """
thomwolf's avatar
thomwolf committed
3265
3266
3267
        x = self.dense(hidden_states).squeeze(-1)

        if p_mask is not None:
Lysandre Debut's avatar
Lysandre Debut committed
3268
            if get_parameter_dtype(self) == torch.float16:
3269
3270
3271
                x = x * (1 - p_mask) - 65500 * p_mask
            else:
                x = x * (1 - p_mask) - 1e30 * p_mask
thomwolf's avatar
thomwolf committed
3272
3273
3274
3275
3276
3277

        return x


class PoolerEndLogits(nn.Module):
    """
Sylvain Gugger's avatar
Sylvain Gugger committed
3278
    Compute SQuAD end logits from sequence hidden states.
3279

Sylvain Gugger's avatar
Sylvain Gugger committed
3280
    Args:
3281
        config ([`PretrainedConfig`]):
Sylvain Gugger's avatar
Sylvain Gugger committed
3282
3283
            The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps`
            to use.
Sylvain Gugger's avatar
Sylvain Gugger committed
3284
3285
3286
    """

    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
3287
        super().__init__()
thomwolf's avatar
thomwolf committed
3288
3289
3290
3291
3292
        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
        self.activation = nn.Tanh()
        self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
        self.dense_1 = nn.Linear(config.hidden_size, 1)

Sylvain Gugger's avatar
Sylvain Gugger committed
3293
3294
3295
3296
3297
3298
3299
3300
3301
    def forward(
        self,
        hidden_states: torch.FloatTensor,
        start_states: Optional[torch.FloatTensor] = None,
        start_positions: Optional[torch.LongTensor] = None,
        p_mask: Optional[torch.FloatTensor] = None,
    ) -> torch.FloatTensor:
        """
        Args:
3302
            hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`):
Sylvain Gugger's avatar
Sylvain Gugger committed
3303
                The final hidden states of the model.
3304
            start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
3305
                The hidden states of the first tokens for the labeled span.
3306
            start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
3307
                The position of the first token for the labeled span.
3308
            p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
3309
3310
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
Sylvain Gugger's avatar
Sylvain Gugger committed
3311

3312
        <Tip>
Sylvain Gugger's avatar
Sylvain Gugger committed
3313

Stas Bekman's avatar
Stas Bekman committed
3314
3315
        One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides
        `start_states`.
3316
3317

        </Tip>
Sylvain Gugger's avatar
Sylvain Gugger committed
3318
3319

        Returns:
3320
            `torch.FloatTensor`: The end logits for SQuAD.
thomwolf's avatar
thomwolf committed
3321
        """
3322
3323
3324
        assert (
            start_states is not None or start_positions is not None
        ), "One of start_states, start_positions should be not None"
thomwolf's avatar
thomwolf committed
3325
        if start_positions is not None:
3326
            slen, hsz = hidden_states.shape[-2:]
3327
3328
3329
            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            start_states = hidden_states.gather(-2, start_positions)  # shape (bsz, 1, hsz)
            start_states = start_states.expand(-1, slen, -1)  # shape (bsz, slen, hsz)
thomwolf's avatar
thomwolf committed
3330
3331
3332
3333
3334
3335
3336

        x = self.dense_0(torch.cat([hidden_states, start_states], dim=-1))
        x = self.activation(x)
        x = self.LayerNorm(x)
        x = self.dense_1(x).squeeze(-1)

        if p_mask is not None:
Lysandre Debut's avatar
Lysandre Debut committed
3337
            if get_parameter_dtype(self) == torch.float16:
3338
3339
3340
                x = x * (1 - p_mask) - 65500 * p_mask
            else:
                x = x * (1 - p_mask) - 1e30 * p_mask
thomwolf's avatar
thomwolf committed
3341
3342
3343
3344
3345

        return x


class PoolerAnswerClass(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
3346
3347
3348
3349
    """
    Compute SQuAD 2.0 answer class from classification and start tokens hidden states.

    Args:
3350
3351
        config ([`PretrainedConfig`]):
            The config used by the model, will be used to grab the `hidden_size` of the model.
Sylvain Gugger's avatar
Sylvain Gugger committed
3352
    """
3353

thomwolf's avatar
thomwolf committed
3354
    def __init__(self, config):
Julien Chaumond's avatar
Julien Chaumond committed
3355
        super().__init__()
thomwolf's avatar
thomwolf committed
3356
3357
3358
3359
        self.dense_0 = nn.Linear(config.hidden_size * 2, config.hidden_size)
        self.activation = nn.Tanh()
        self.dense_1 = nn.Linear(config.hidden_size, 1, bias=False)

Sylvain Gugger's avatar
Sylvain Gugger committed
3360
3361
3362
3363
3364
3365
3366
    def forward(
        self,
        hidden_states: torch.FloatTensor,
        start_states: Optional[torch.FloatTensor] = None,
        start_positions: Optional[torch.LongTensor] = None,
        cls_index: Optional[torch.LongTensor] = None,
    ) -> torch.FloatTensor:
3367
3368
        """
        Args:
3369
            hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`):
Sylvain Gugger's avatar
Sylvain Gugger committed
3370
                The final hidden states of the model.
3371
            start_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
3372
                The hidden states of the first tokens for the labeled span.
3373
            start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
3374
                The position of the first token for the labeled span.
3375
3376
3377
3378
            cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
                Position of the CLS token for each sentence in the batch. If `None`, takes the last token.

        <Tip>
Sylvain Gugger's avatar
Sylvain Gugger committed
3379

Stas Bekman's avatar
Stas Bekman committed
3380
3381
        One of `start_states` or `start_positions` should be not `None`. If both are set, `start_positions` overrides
        `start_states`.
Sylvain Gugger's avatar
Sylvain Gugger committed
3382

3383
        </Tip>
Sylvain Gugger's avatar
Sylvain Gugger committed
3384
3385

        Returns:
3386
            `torch.FloatTensor`: The SQuAD 2.0 answer class.
thomwolf's avatar
thomwolf committed
3387
        """
Sylvain Gugger's avatar
Sylvain Gugger committed
3388
        # No dependency on end_feature so that we can obtain one single `cls_logits` for each sample.
3389
        hsz = hidden_states.shape[-1]
3390
3391
3392
        assert (
            start_states is not None or start_positions is not None
        ), "One of start_states, start_positions should be not None"
thomwolf's avatar
thomwolf committed
3393
        if start_positions is not None:
3394
3395
            start_positions = start_positions[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            start_states = hidden_states.gather(-2, start_positions).squeeze(-2)  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
3396
3397

        if cls_index is not None:
3398
3399
            cls_index = cls_index[:, None, None].expand(-1, -1, hsz)  # shape (bsz, 1, hsz)
            cls_token_state = hidden_states.gather(-2, cls_index).squeeze(-2)  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
3400
        else:
3401
            cls_token_state = hidden_states[:, -1, :]  # shape (bsz, hsz)
thomwolf's avatar
thomwolf committed
3402
3403
3404
3405
3406
3407
3408
3409

        x = self.dense_0(torch.cat([start_states, cls_token_state], dim=-1))
        x = self.activation(x)
        x = self.dense_1(x).squeeze(-1)

        return x


3410
3411
3412
@dataclass
class SquadHeadOutput(ModelOutput):
    """
3413
    Base class for outputs of question answering models using a [`~modeling_utils.SQuADHead`].
3414
3415

    Args:
3416
        loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned if both `start_positions` and `end_positions` are provided):
Sylvain Gugger's avatar
Sylvain Gugger committed
3417
3418
            Classification loss as the sum of start token, end token (and is_impossible if provided) classification
            losses.
3419
        start_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
3420
            Log probabilities for the top config.start_n_top start token possibilities (beam-search).
3421
        start_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
3422
            Indices for the top config.start_n_top start token possibilities (beam-search).
3423
3424
        end_top_log_probs (`torch.FloatTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
            Log probabilities for the top `config.start_n_top * config.end_n_top` end token possibilities
Sylvain Gugger's avatar
Sylvain Gugger committed
3425
            (beam-search).
3426
3427
3428
3429
        end_top_index (`torch.LongTensor` of shape `(batch_size, config.start_n_top * config.end_n_top)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
            Indices for the top `config.start_n_top * config.end_n_top` end token possibilities (beam-search).
        cls_logits (`torch.FloatTensor` of shape `(batch_size,)`, *optional*, returned if `start_positions` or `end_positions` is not provided):
            Log probabilities for the `is_impossible` label of the answers.
3430
3431
3432
3433
3434
3435
3436
3437
3438
3439
3440

    """

    loss: Optional[torch.FloatTensor] = None
    start_top_log_probs: Optional[torch.FloatTensor] = None
    start_top_index: Optional[torch.LongTensor] = None
    end_top_log_probs: Optional[torch.FloatTensor] = None
    end_top_index: Optional[torch.LongTensor] = None
    cls_logits: Optional[torch.FloatTensor] = None


thomwolf's avatar
thomwolf committed
3441
class SQuADHead(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
3442
3443
    r"""
    A SQuAD head inspired by XLNet.
3444

Sylvain Gugger's avatar
Sylvain Gugger committed
3445
    Args:
3446
        config ([`PretrainedConfig`]):
Sylvain Gugger's avatar
Sylvain Gugger committed
3447
3448
            The config used by the model, will be used to grab the `hidden_size` of the model and the `layer_norm_eps`
            to use.
thomwolf's avatar
thomwolf committed
3449
    """
3450

thomwolf's avatar
thomwolf committed
3451
    def __init__(self, config):
Julien Chaumond's avatar
Julien Chaumond committed
3452
        super().__init__()
thomwolf's avatar
thomwolf committed
3453
3454
3455
3456
3457
3458
3459
        self.start_n_top = config.start_n_top
        self.end_n_top = config.end_n_top

        self.start_logits = PoolerStartLogits(config)
        self.end_logits = PoolerEndLogits(config)
        self.answer_class = PoolerAnswerClass(config)

Sylvain Gugger's avatar
Sylvain Gugger committed
3460
    @replace_return_docstrings(output_type=SquadHeadOutput, config_class=PretrainedConfig)
3461
    def forward(
3462
        self,
Sylvain Gugger's avatar
Sylvain Gugger committed
3463
3464
3465
3466
3467
3468
        hidden_states: torch.FloatTensor,
        start_positions: Optional[torch.LongTensor] = None,
        end_positions: Optional[torch.LongTensor] = None,
        cls_index: Optional[torch.LongTensor] = None,
        is_impossible: Optional[torch.LongTensor] = None,
        p_mask: Optional[torch.FloatTensor] = None,
3469
        return_dict: bool = False,
Sylvain Gugger's avatar
Sylvain Gugger committed
3470
3471
    ) -> Union[SquadHeadOutput, Tuple[torch.FloatTensor]]:
        """
Lysandre's avatar
Lysandre committed
3472
        Args:
3473
            hidden_states (`torch.FloatTensor` of shape `(batch_size, seq_len, hidden_size)`):
Lysandre's avatar
Lysandre committed
3474
                Final hidden states of the model on the sequence tokens.
3475
            start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Lysandre's avatar
Lysandre committed
3476
                Positions of the first token for the labeled span.
3477
            end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Lysandre's avatar
Lysandre committed
3478
                Positions of the last token for the labeled span.
3479
3480
3481
            cls_index (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
                Position of the CLS token for each sentence in the batch. If `None`, takes the last token.
            is_impossible (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Lysandre's avatar
Lysandre committed
3482
                Whether the question has a possible answer in the paragraph or not.
3483
            p_mask (`torch.FloatTensor` of shape `(batch_size, seq_len)`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
3484
3485
                Mask for tokens at invalid position, such as query and special symbols (PAD, SEP, CLS). 1.0 means token
                should be masked.
3486
            return_dict (`bool`, *optional*, defaults to `False`):
3487
                Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
Sylvain Gugger's avatar
Sylvain Gugger committed
3488

Lysandre's avatar
Lysandre committed
3489
        Returns:
Sylvain Gugger's avatar
Sylvain Gugger committed
3490
        """
thomwolf's avatar
thomwolf committed
3491
        start_logits = self.start_logits(hidden_states, p_mask=p_mask)
thomwolf's avatar
thomwolf committed
3492
3493
3494
3495
3496
3497
3498
3499
3500
3501
3502
3503
3504
3505
3506
3507
3508
3509
3510
3511
3512
3513
3514

        if start_positions is not None and end_positions is not None:
            # If we are on multi-GPU, let's remove the dimension added by batch splitting
            for x in (start_positions, end_positions, cls_index, is_impossible):
                if x is not None and x.dim() > 1:
                    x.squeeze_(-1)

            # during training, compute the end logits based on the ground truth of the start position
            end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask)

            loss_fct = CrossEntropyLoss()
            start_loss = loss_fct(start_logits, start_positions)
            end_loss = loss_fct(end_logits, end_positions)
            total_loss = (start_loss + end_loss) / 2

            if cls_index is not None and is_impossible is not None:
                # Predict answerability from the representation of CLS and START
                cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index)
                loss_fct_cls = nn.BCEWithLogitsLoss()
                cls_loss = loss_fct_cls(cls_logits, is_impossible)

                # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss
                total_loss += cls_loss * 0.5
3515

3516
            return SquadHeadOutput(loss=total_loss) if return_dict else (total_loss,)
thomwolf's avatar
thomwolf committed
3517
3518
3519
3520

        else:
            # during inference, compute the end logits based on beam search
            bsz, slen, hsz = hidden_states.size()
3521
            start_log_probs = nn.functional.softmax(start_logits, dim=-1)  # shape (bsz, slen)
3522
3523
3524
3525
3526
3527
3528
3529
3530
3531
3532

            start_top_log_probs, start_top_index = torch.topk(
                start_log_probs, self.start_n_top, dim=-1
            )  # shape (bsz, start_n_top)
            start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz)  # shape (bsz, start_n_top, hsz)
            start_states = torch.gather(hidden_states, -2, start_top_index_exp)  # shape (bsz, start_n_top, hsz)
            start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1)  # shape (bsz, slen, start_n_top, hsz)

            hidden_states_expanded = hidden_states.unsqueeze(2).expand_as(
                start_states
            )  # shape (bsz, slen, start_n_top, hsz)
thomwolf's avatar
thomwolf committed
3533
3534
            p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None
            end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask)
3535
            end_log_probs = nn.functional.softmax(end_logits, dim=1)  # shape (bsz, slen, start_n_top)
thomwolf's avatar
thomwolf committed
3536

3537
3538
3539
            end_top_log_probs, end_top_index = torch.topk(
                end_log_probs, self.end_n_top, dim=1
            )  # shape (bsz, end_n_top, start_n_top)
thomwolf's avatar
thomwolf committed
3540
3541
3542
3543
3544
3545
            end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top)
            end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top)

            start_states = torch.einsum("blh,bl->bh", hidden_states, start_log_probs)
            cls_logits = self.answer_class(hidden_states, start_states=start_states, cls_index=cls_index)

3546
            if not return_dict:
3547
3548
3549
3550
3551
3552
3553
3554
3555
                return (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits)
            else:
                return SquadHeadOutput(
                    start_top_log_probs=start_top_log_probs,
                    start_top_index=start_top_index,
                    end_top_log_probs=end_top_log_probs,
                    end_top_index=end_top_index,
                    cls_logits=cls_logits,
                )
thomwolf's avatar
thomwolf committed
3556
3557
3558


class SequenceSummary(nn.Module):
Sylvain Gugger's avatar
Sylvain Gugger committed
3559
3560
3561
3562
    r"""
    Compute a single vector summary of a sequence hidden states.

    Args:
3563
        config ([`PretrainedConfig`]):
Sylvain Gugger's avatar
Sylvain Gugger committed
3564
3565
            The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
            config class of your model for the default values it uses):
Sylvain Gugger's avatar
Sylvain Gugger committed
3566

3567
            - **summary_type** (`str`) -- The method to use to make this summary. Accepted values are:
Sylvain Gugger's avatar
Sylvain Gugger committed
3568

3569
3570
3571
3572
3573
                - `"last"` -- Take the last token hidden state (like XLNet)
                - `"first"` -- Take the first token hidden state (like Bert)
                - `"mean"` -- Take the mean of all tokens hidden states
                - `"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
                - `"attn"` -- Not implemented now, use multi-head attention
Sylvain Gugger's avatar
Sylvain Gugger committed
3574

3575
            - **summary_use_proj** (`bool`) -- Add a projection after the vector extraction.
Sylvain Gugger's avatar
Sylvain Gugger committed
3576
3577
3578
3579
3580
3581
            - **summary_proj_to_labels** (`bool`) -- If `True`, the projection outputs to `config.num_labels` classes
              (otherwise to `config.hidden_size`).
            - **summary_activation** (`Optional[str]`) -- Set to `"tanh"` to add a tanh activation to the output,
              another string or `None` will add no activation.
            - **summary_first_dropout** (`float`) -- Optional dropout probability before the projection and activation.
            - **summary_last_dropout** (`float`)-- Optional dropout probability after the projection and activation.
thomwolf's avatar
thomwolf committed
3582
    """
3583

3584
    def __init__(self, config: PretrainedConfig):
Julien Chaumond's avatar
Julien Chaumond committed
3585
        super().__init__()
thomwolf's avatar
thomwolf committed
3586

3587
        self.summary_type = getattr(config, "summary_type", "last")
3588
        if self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
3589
3590
3591
3592
3593
            # We should use a standard multi-head attention module with absolute positional embedding for that.
            # Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
            # We can probably just use the multi-head attention module of PyTorch >=1.1.0
            raise NotImplementedError

thomwolf's avatar
thomwolf committed
3594
        self.summary = Identity()
3595
3596
        if hasattr(config, "summary_use_proj") and config.summary_use_proj:
            if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
3597
                num_classes = config.num_labels
thomwolf's avatar
thomwolf committed
3598
3599
3600
3601
            else:
                num_classes = config.hidden_size
            self.summary = nn.Linear(config.hidden_size, num_classes)

3602
        activation_string = getattr(config, "summary_activation", None)
Lysandre's avatar
Lysandre committed
3603
        self.activation: Callable = get_activation(activation_string) if activation_string else Identity()
thomwolf's avatar
thomwolf committed
3604

thomwolf's avatar
thomwolf committed
3605
        self.first_dropout = Identity()
3606
        if hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0:
3607
3608
            self.first_dropout = nn.Dropout(config.summary_first_dropout)

thomwolf's avatar
thomwolf committed
3609
        self.last_dropout = Identity()
3610
        if hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0:
3611
            self.last_dropout = nn.Dropout(config.summary_last_dropout)
thomwolf's avatar
thomwolf committed
3612

Sylvain Gugger's avatar
Sylvain Gugger committed
3613
3614
3615
3616
3617
3618
3619
    def forward(
        self, hidden_states: torch.FloatTensor, cls_index: Optional[torch.LongTensor] = None
    ) -> torch.FloatTensor:
        """
        Compute a single vector summary of a sequence hidden states.

        Args:
3620
            hidden_states (`torch.FloatTensor` of shape `[batch_size, seq_len, hidden_size]`):
Sylvain Gugger's avatar
Sylvain Gugger committed
3621
                The hidden states of the last layer.
3622
            cls_index (`torch.LongTensor` of shape `[batch_size]` or `[batch_size, ...]` where ... are optional leading dimensions of `hidden_states`, *optional*):
Sylvain Gugger's avatar
Sylvain Gugger committed
3623
                Used if `summary_type == "cls_index"` and takes the last token of the sequence as classification token.
Sylvain Gugger's avatar
Sylvain Gugger committed
3624
3625

        Returns:
3626
            `torch.FloatTensor`: The summary of the sequence hidden states.
thomwolf's avatar
thomwolf committed
3627
        """
3628
        if self.summary_type == "last":
thomwolf's avatar
thomwolf committed
3629
            output = hidden_states[:, -1]
3630
        elif self.summary_type == "first":
thomwolf's avatar
thomwolf committed
3631
            output = hidden_states[:, 0]
3632
        elif self.summary_type == "mean":
thomwolf's avatar
thomwolf committed
3633
            output = hidden_states.mean(dim=1)
3634
        elif self.summary_type == "cls_index":
thomwolf's avatar
thomwolf committed
3635
            if cls_index is None:
Lysandre's avatar
Lysandre committed
3636
3637
3638
3639
3640
                cls_index = torch.full_like(
                    hidden_states[..., :1, :],
                    hidden_states.shape[-2] - 1,
                    dtype=torch.long,
                )
thomwolf's avatar
thomwolf committed
3641
            else:
thomwolf's avatar
thomwolf committed
3642
                cls_index = cls_index.unsqueeze(-1).unsqueeze(-1)
3643
                cls_index = cls_index.expand((-1,) * (cls_index.dim() - 1) + (hidden_states.size(-1),))
thomwolf's avatar
thomwolf committed
3644
            # shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
3645
3646
            output = hidden_states.gather(-2, cls_index).squeeze(-2)  # shape (bsz, XX, hidden_size)
        elif self.summary_type == "attn":
thomwolf's avatar
thomwolf committed
3647
3648
            raise NotImplementedError

3649
        output = self.first_dropout(output)
thomwolf's avatar
thomwolf committed
3650
3651
        output = self.summary(output)
        output = self.activation(output)
3652
        output = self.last_dropout(output)
thomwolf's avatar
thomwolf committed
3653
3654
3655
3656

        return output


3657
def unwrap_model(model: nn.Module) -> nn.Module:
3658
3659
3660
3661
    """
    Recursively unwraps a model from potential containers (as used in distributed training).

    Args:
3662
        model (`torch.nn.Module`): The model to unwrap.
3663
3664
3665
3666
3667
3668
    """
    # since there could be multiple levels of wrapping, unwrap recursively
    if hasattr(model, "module"):
        return unwrap_model(model.module)
    else:
        return model
Sylvain Gugger's avatar
Sylvain Gugger committed
3669
3670
3671
3672
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684
3685
3686
3687
3688
3689
3690
3691


def expand_device_map(device_map, param_names):
    """
    Expand a device map to return the correspondance parameter name to device.
    """
    new_device_map = {}
    for module, device in device_map.items():
        new_device_map.update({p: device for p in param_names if p == module or p.startswith(f"{module}.")})
    return new_device_map


def get_disk_only_shard_files(device_map, sharded_metadata):
    """
    Returns the list of shard files containing only weights offloaded to disk.
    """
    files_content = collections.defaultdict(list)
    for weight_name, filename in sharded_metadata["weight_map"].items():
        while len(weight_name) > 0 and weight_name not in device_map:
            weight_name = ".".join(weight_name.split(".")[:-1])
        files_content[filename].append(device_map[weight_name])

    return [fname for fname, devices in files_content.items() if set(devices) == {"disk"}]