pipeline_utils.py 74 KB
Newer Older
1
# coding=utf-8
Patrick von Platen's avatar
Patrick von Platen committed
2
# Copyright 2023 The HuggingFace Inc. team.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# Copyright (c) 2022, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

17
import fnmatch
18
19
20
import importlib
import inspect
import os
21
import re
22
import sys
23
import warnings
24
25
from dataclasses import dataclass
from pathlib import Path
26
from typing import Any, Callable, Dict, List, Optional, Union
27
28
29

import numpy as np
import PIL
30
import torch
31
from huggingface_hub import hf_hub_download, model_info, snapshot_download
32
from packaging import version
33
from requests.exceptions import HTTPError
34
35
from tqdm.auto import tqdm

36
37
import diffusers

38
from .. import __version__
39
40
41
42
43
from ..configuration_utils import ConfigMixin
from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT
from ..schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
from ..utils import (
    CONFIG_NAME,
44
    DEPRECATED_REVISION_ARGS,
45
46
    DIFFUSERS_CACHE,
    HF_HUB_OFFLINE,
47
    SAFETENSORS_WEIGHTS_NAME,
48
49
50
51
52
    WEIGHTS_NAME,
    BaseOutput,
    deprecate,
    get_class_from_dynamic_module,
    is_accelerate_available,
53
    is_accelerate_version,
54
    is_compiled_module,
55
56
57
58
    is_safetensors_available,
    is_torch_version,
    is_transformers_available,
    logging,
Patrick von Platen's avatar
Patrick von Platen committed
59
    numpy_to_pil,
60
61
62
63
64
65
)


if is_transformers_available():
    import transformers
    from transformers import PreTrainedModel
66
67
68
69
    from transformers.utils import FLAX_WEIGHTS_NAME as TRANSFORMERS_FLAX_WEIGHTS_NAME
    from transformers.utils import SAFE_WEIGHTS_NAME as TRANSFORMERS_SAFE_WEIGHTS_NAME
    from transformers.utils import WEIGHTS_NAME as TRANSFORMERS_WEIGHTS_NAME

70
from ..utils import FLAX_WEIGHTS_NAME, ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME
71
72


73
74
75
76
if is_accelerate_available():
    import accelerate


77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
INDEX_FILE = "diffusion_pytorch_model.bin"
CUSTOM_PIPELINE_FILE_NAME = "pipeline.py"
DUMMY_MODULES_FOLDER = "diffusers.utils"
TRANSFORMERS_DUMMY_MODULES_FOLDER = "transformers.utils"


logger = logging.get_logger(__name__)


LOADABLE_CLASSES = {
    "diffusers": {
        "ModelMixin": ["save_pretrained", "from_pretrained"],
        "SchedulerMixin": ["save_pretrained", "from_pretrained"],
        "DiffusionPipeline": ["save_pretrained", "from_pretrained"],
        "OnnxRuntimeModel": ["save_pretrained", "from_pretrained"],
    },
    "transformers": {
        "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"],
        "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"],
        "PreTrainedModel": ["save_pretrained", "from_pretrained"],
        "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"],
        "ProcessorMixin": ["save_pretrained", "from_pretrained"],
        "ImageProcessingMixin": ["save_pretrained", "from_pretrained"],
    },
    "onnxruntime.training": {
        "ORTModule": ["save_pretrained", "from_pretrained"],
    },
}

ALL_IMPORTABLE_CLASSES = {}
for library in LOADABLE_CLASSES:
    ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library])


@dataclass
class ImagePipelineOutput(BaseOutput):
    """
    Output class for image pipelines.

    Args:
        images (`List[PIL.Image.Image]` or `np.ndarray`)
Steven Liu's avatar
Steven Liu committed
118
119
            List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width,
            num_channels)`.
120
121
122
123
124
125
126
127
128
129
130
131
    """

    images: Union[List[PIL.Image.Image], np.ndarray]


@dataclass
class AudioPipelineOutput(BaseOutput):
    """
    Output class for audio pipelines.

    Args:
        audios (`np.ndarray`)
Steven Liu's avatar
Steven Liu committed
132
            List of denoised audio samples of a NumPy array of shape `(batch_size, num_channels, sample_rate)`.
133
134
135
136
137
    """

    audios: np.ndarray


138
def is_safetensors_compatible(filenames, variant=None, passed_components=None) -> bool:
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
    """
    Checking for safetensors compatibility:
    - By default, all models are saved with the default pytorch serialization, so we use the list of default pytorch
      files to know which safetensors files are needed.
    - The model is safetensors compatible only if there is a matching safetensors file for every default pytorch file.

    Converting default pytorch serialized filenames to safetensors serialized filenames:
    - For models from the diffusers library, just replace the ".bin" extension with ".safetensors"
    - For models from the transformers library, the filename changes from "pytorch_model" to "model", and the ".bin"
      extension is replaced with ".safetensors"
    """
    pt_filenames = []

    sf_filenames = set()

154
155
    passed_components = passed_components or []

156
157
158
    for filename in filenames:
        _, extension = os.path.splitext(filename)

159
160
161
        if len(filename.split("/")) == 2 and filename.split("/")[0] in passed_components:
            continue

162
163
164
165
166
167
168
169
170
171
        if extension == ".bin":
            pt_filenames.append(filename)
        elif extension == ".safetensors":
            sf_filenames.add(filename)

    for filename in pt_filenames:
        #  filename = 'foo/bar/baz.bam' -> path = 'foo/bar', filename = 'baz', extention = '.bam'
        path, filename = os.path.split(filename)
        filename, extension = os.path.splitext(filename)

172
173
        if filename.startswith("pytorch_model"):
            filename = filename.replace("pytorch_model", "model")
174
        else:
175
176
177
178
179
180
181
182
183
184
            filename = filename

        expected_sf_filename = os.path.join(path, filename)
        expected_sf_filename = f"{expected_sf_filename}.safetensors"

        if expected_sf_filename not in sf_filenames:
            logger.warning(f"{expected_sf_filename} not found")
            return False

    return True
185
186


187
def variant_compatible_siblings(filenames, variant=None) -> Union[List[os.PathLike], str]:
188
189
190
191
192
193
194
    weight_names = [
        WEIGHTS_NAME,
        SAFETENSORS_WEIGHTS_NAME,
        FLAX_WEIGHTS_NAME,
        ONNX_WEIGHTS_NAME,
        ONNX_EXTERNAL_WEIGHTS_NAME,
    ]
195
196
197
198
199
200
201
202

    if is_transformers_available():
        weight_names += [TRANSFORMERS_WEIGHTS_NAME, TRANSFORMERS_SAFE_WEIGHTS_NAME, TRANSFORMERS_FLAX_WEIGHTS_NAME]

    # model_pytorch, diffusion_model_pytorch, ...
    weight_prefixes = [w.split(".")[0] for w in weight_names]
    # .bin, .safetensors, ...
    weight_suffixs = [w.split(".")[-1] for w in weight_names]
203
    # -00001-of-00002
204
    transformers_index_format = r"\d{5}-of-\d{5}"
205
206

    if variant is not None:
207
        # `diffusion_pytorch_model.fp16.bin` as well as `model.fp16-00001-of-00002.safetensors`
208
        variant_file_re = re.compile(
209
            rf"({'|'.join(weight_prefixes)})\.({variant}|{variant}-{transformers_index_format})\.({'|'.join(weight_suffixs)})$"
210
211
212
        )
        # `text_encoder/pytorch_model.bin.index.fp16.json`
        variant_index_re = re.compile(
213
            rf"({'|'.join(weight_prefixes)})\.({'|'.join(weight_suffixs)})\.index\.{variant}\.json$"
214
        )
215

216
    # `diffusion_pytorch_model.bin` as well as `model-00001-of-00002.safetensors`
217
    non_variant_file_re = re.compile(
218
        rf"({'|'.join(weight_prefixes)})(-{transformers_index_format})?\.({'|'.join(weight_suffixs)})$"
219
    )
220
    # `text_encoder/pytorch_model.bin.index.json`
221
    non_variant_index_re = re.compile(rf"({'|'.join(weight_prefixes)})\.({'|'.join(weight_suffixs)})\.index\.json")
222
223

    if variant is not None:
224
225
226
        variant_weights = {f for f in filenames if variant_file_re.match(f.split("/")[-1]) is not None}
        variant_indexes = {f for f in filenames if variant_index_re.match(f.split("/")[-1]) is not None}
        variant_filenames = variant_weights | variant_indexes
227
228
229
    else:
        variant_filenames = set()

230
231
232
    non_variant_weights = {f for f in filenames if non_variant_file_re.match(f.split("/")[-1]) is not None}
    non_variant_indexes = {f for f in filenames if non_variant_index_re.match(f.split("/")[-1]) is not None}
    non_variant_filenames = non_variant_weights | non_variant_indexes
233

234
    # all variant filenames will be used by default
235
    usable_filenames = set(variant_filenames)
236
237
238
239
240
241
242
243
244
245

    def convert_to_variant(filename):
        if "index" in filename:
            variant_filename = filename.replace("index", f"index.{variant}")
        elif re.compile(f"^(.*?){transformers_index_format}").match(filename) is not None:
            variant_filename = f"{filename.split('-')[0]}.{variant}-{'-'.join(filename.split('-')[1:])}"
        else:
            variant_filename = f"{filename.split('.')[0]}.{variant}.{filename.split('.')[1]}"
        return variant_filename

246
    for f in non_variant_filenames:
247
        variant_filename = convert_to_variant(f)
248
249
250
251
252
253
        if variant_filename not in usable_filenames:
            usable_filenames.add(f)

    return usable_filenames, variant_filenames


254
255
256
257
258
259
def warn_deprecated_model_variant(pretrained_model_name_or_path, use_auth_token, variant, revision, model_filenames):
    info = model_info(
        pretrained_model_name_or_path,
        use_auth_token=use_auth_token,
        revision=None,
    )
260
    filenames = {sibling.rfilename for sibling in info.siblings}
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
    comp_model_filenames, _ = variant_compatible_siblings(filenames, variant=revision)
    comp_model_filenames = [".".join(f.split(".")[:1] + f.split(".")[2:]) for f in comp_model_filenames]

    if set(comp_model_filenames) == set(model_filenames):
        warnings.warn(
            f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` even though you can load it via `variant=`{revision}`. Loading model variants via `revision='{revision}'` is deprecated and will be removed in diffusers v1. Please use `variant='{revision}'` instead.",
            FutureWarning,
        )
    else:
        warnings.warn(
            f"You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have the required variant filenames in the 'main' branch. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {revision} files' so that the correct variant file can be added.",
            FutureWarning,
        )


def maybe_raise_or_warn(
    library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module
):
    """Simple helper method to raise or warn in case incorrect module has been passed"""
    if not is_pipeline_module:
        library = importlib.import_module(library_name)
        class_obj = getattr(library, class_name)
        class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()}

        expected_class_obj = None
        for class_name, class_candidate in class_candidates.items():
            if class_candidate is not None and issubclass(class_obj, class_candidate):
                expected_class_obj = class_candidate

290
291
292
293
294
295
296
297
        # Dynamo wraps the original model in a private class.
        # I didn't find a public API to get the original class.
        sub_model = passed_class_obj[name]
        model_cls = sub_model.__class__
        if is_compiled_module(sub_model):
            model_cls = sub_model._orig_mod.__class__

        if not issubclass(model_cls, expected_class_obj):
298
            raise ValueError(
299
                f"{passed_class_obj[name]} is of type: {model_cls}, but should be" f" {expected_class_obj}"
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
            )
    else:
        logger.warning(
            f"You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it"
            " has the correct type"
        )


def get_class_obj_and_candidates(library_name, class_name, importable_classes, pipelines, is_pipeline_module):
    """Simple helper method to retrieve class object of module as well as potential parent class objects"""
    if is_pipeline_module:
        pipeline_module = getattr(pipelines, library_name)

        class_obj = getattr(pipeline_module, class_name)
        class_candidates = {c: class_obj for c in importable_classes.keys()}
    else:
        # else we just import it from the library.
        library = importlib.import_module(library_name)

        class_obj = getattr(library, class_name)
        class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()}

    return class_obj, class_candidates


325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
def _get_pipeline_class(class_obj, config, custom_pipeline=None, cache_dir=None, revision=None):
    if custom_pipeline is not None:
        if custom_pipeline.endswith(".py"):
            path = Path(custom_pipeline)
            # decompose into folder & file
            file_name = path.name
            custom_pipeline = path.parent.absolute()
        else:
            file_name = CUSTOM_PIPELINE_FILE_NAME

        return get_class_from_dynamic_module(
            custom_pipeline, module_file=file_name, cache_dir=cache_dir, revision=revision
        )

    if class_obj != DiffusionPipeline:
        return class_obj

    diffusers_module = importlib.import_module(class_obj.__module__.split(".")[0])
    return getattr(diffusers_module, config["_class_name"])


346
347
348
349
350
351
352
353
354
355
356
def load_sub_model(
    library_name: str,
    class_name: str,
    importable_classes: List[Any],
    pipelines: Any,
    is_pipeline_module: bool,
    pipeline_class: Any,
    torch_dtype: torch.dtype,
    provider: Any,
    sess_options: Any,
    device_map: Optional[Union[Dict[str, torch.device], str]],
357
358
359
    max_memory: Optional[Dict[Union[int, str], Union[int, str]]],
    offload_folder: Optional[Union[str, os.PathLike]],
    offload_state_dict: bool,
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
    model_variants: Dict[str, str],
    name: str,
    from_flax: bool,
    variant: str,
    low_cpu_mem_usage: bool,
    cached_folder: Union[str, os.PathLike],
):
    """Helper method to load the module `name` from `library_name` and `class_name`"""
    # retrieve class candidates
    class_obj, class_candidates = get_class_obj_and_candidates(
        library_name, class_name, importable_classes, pipelines, is_pipeline_module
    )

    load_method_name = None
    # retrive load method name
    for class_name, class_candidate in class_candidates.items():
        if class_candidate is not None and issubclass(class_obj, class_candidate):
            load_method_name = importable_classes[class_name][1]

    # if load method name is None, then we have a dummy module -> raise Error
    if load_method_name is None:
        none_module = class_obj.__module__
        is_dummy_path = none_module.startswith(DUMMY_MODULES_FOLDER) or none_module.startswith(
            TRANSFORMERS_DUMMY_MODULES_FOLDER
        )
        if is_dummy_path and "dummy" in none_module:
            # call class_obj for nice error message of missing requirements
            class_obj()

        raise ValueError(
            f"The component {class_obj} of {pipeline_class} cannot be loaded as it does not seem to have"
            f" any of the loading methods defined in {ALL_IMPORTABLE_CLASSES}."
        )

    load_method = getattr(class_obj, load_method_name)

    # add kwargs to loading method
    loading_kwargs = {}
    if issubclass(class_obj, torch.nn.Module):
        loading_kwargs["torch_dtype"] = torch_dtype
    if issubclass(class_obj, diffusers.OnnxRuntimeModel):
        loading_kwargs["provider"] = provider
        loading_kwargs["sess_options"] = sess_options

    is_diffusers_model = issubclass(class_obj, diffusers.ModelMixin)

    if is_transformers_available():
        transformers_version = version.parse(version.parse(transformers.__version__).base_version)
    else:
        transformers_version = "N/A"

    is_transformers_model = (
        is_transformers_available()
        and issubclass(class_obj, PreTrainedModel)
        and transformers_version >= version.parse("4.20.0")
    )

    # When loading a transformers model, if the device_map is None, the weights will be initialized as opposed to diffusers.
    # To make default loading faster we set the `low_cpu_mem_usage=low_cpu_mem_usage` flag which is `True` by default.
    # This makes sure that the weights won't be initialized which significantly speeds up loading.
    if is_diffusers_model or is_transformers_model:
        loading_kwargs["device_map"] = device_map
422
423
424
        loading_kwargs["max_memory"] = max_memory
        loading_kwargs["offload_folder"] = offload_folder
        loading_kwargs["offload_state_dict"] = offload_state_dict
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
        loading_kwargs["variant"] = model_variants.pop(name, None)
        if from_flax:
            loading_kwargs["from_flax"] = True

        # the following can be deleted once the minimum required `transformers` version
        # is higher than 4.27
        if (
            is_transformers_model
            and loading_kwargs["variant"] is not None
            and transformers_version < version.parse("4.27.0")
        ):
            raise ImportError(
                f"When passing `variant='{variant}'`, please make sure to upgrade your `transformers` version to at least 4.27.0.dev0"
            )
        elif is_transformers_model and loading_kwargs["variant"] is None:
            loading_kwargs.pop("variant")

        # if `from_flax` and model is transformer model, can currently not load with `low_cpu_mem_usage`
        if not (from_flax and is_transformers_model):
            loading_kwargs["low_cpu_mem_usage"] = low_cpu_mem_usage
        else:
            loading_kwargs["low_cpu_mem_usage"] = False

    # check if the module is in a subdirectory
    if os.path.isdir(os.path.join(cached_folder, name)):
        loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs)
    else:
        # else load from the root directory
        loaded_sub_model = load_method(cached_folder, **loading_kwargs)

    return loaded_sub_model


458
459
class DiffusionPipeline(ConfigMixin):
    r"""
Steven Liu's avatar
Steven Liu committed
460
    Base class for all pipelines.
461

Steven Liu's avatar
Steven Liu committed
462
463
    [`DiffusionPipeline`] stores all components (models, schedulers, and processors) for diffusion pipelines and
    provides methods for loading, downloading and saving models. It also includes methods to:
464
465
466
467
468
469

        - move all PyTorch modules to the device of your choice
        - enabling/disabling the progress bar for the denoising iteration

    Class attributes:

Steven Liu's avatar
Steven Liu committed
470
471
472
473
        - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the
          diffusion pipeline's components.
        - **_optional_components** (List[`str`]) -- List of all optional components that don't have to be passed to the
          pipeline to function (should be overridden by subclasses).
474
475
476
    """
    config_name = "model_index.json"
    _optional_components = []
477
    _exclude_from_cpu_offload = []
478
479
480
481
482
483
484
485
486
487

    def register_modules(self, **kwargs):
        # import it here to avoid circular import
        from diffusers import pipelines

        for name, module in kwargs.items():
            # retrieve library
            if module is None:
                register_dict = {name: (None, None)}
            else:
488
                # register the config from the original module, not the dynamo compiled one
489
                if is_compiled_module(module):
490
491
492
                    not_compiled_module = module._orig_mod
                else:
                    not_compiled_module = module
493

494
                library = not_compiled_module.__module__.split(".")[0]
495
496

                # check if the module is a pipeline module
497
                module_path_items = not_compiled_module.__module__.split(".")
498
499
                pipeline_dir = module_path_items[-2] if len(module_path_items) > 2 else None

500
                path = not_compiled_module.__module__.split(".")
501
502
503
504
505
                is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir)

                # if library is not in LOADABLE_CLASSES, then it is a custom module.
                # Or if it's a pipeline module, then the module is inside the pipeline
                # folder so we set the library to module name.
506
                if is_pipeline_module:
507
                    library = pipeline_dir
508
                elif library not in LOADABLE_CLASSES:
509
                    library = not_compiled_module.__module__
510
511

                # retrieve class_name
512
                class_name = not_compiled_module.__class__.__name__
513
514
515
516
517
518
519
520
521

                register_dict = {name: (library, class_name)}

            # save model index config
            self.register_to_config(**register_dict)

            # set models
            setattr(self, name, module)

522
    def __setattr__(self, name: str, value: Any):
523
        if name in self.__dict__ and hasattr(self.config, name):
524
525
            # We need to overwrite the config if name exists in config
            if isinstance(getattr(self.config, name), (tuple, list)):
526
                if value is not None and self.config[name][0] is not None:
527
528
529
530
531
532
533
534
535
536
                    class_library_tuple = (value.__module__.split(".")[0], value.__class__.__name__)
                else:
                    class_library_tuple = (None, None)

                self.register_to_config(**{name: class_library_tuple})
            else:
                self.register_to_config(**{name: value})

        super().__setattr__(name, value)

537
538
539
540
    def save_pretrained(
        self,
        save_directory: Union[str, os.PathLike],
        safe_serialization: bool = False,
541
        variant: Optional[str] = None,
542
543
    ):
        """
Steven Liu's avatar
Steven Liu committed
544
545
546
        Save all saveable variables of the pipeline to a directory. A pipeline variable can be saved and loaded if its
        class implements both a save and loading method. The pipeline is easily reloaded using the
        [`~DiffusionPipeline.from_pretrained`] class method.
547
548
549

        Arguments:
            save_directory (`str` or `os.PathLike`):
Steven Liu's avatar
Steven Liu committed
550
                Directory to save a pipeline to. Will be created if it doesn't exist.
551
            safe_serialization (`bool`, *optional*, defaults to `False`):
Steven Liu's avatar
Steven Liu committed
552
                Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
553
            variant (`str`, *optional*):
Steven Liu's avatar
Steven Liu committed
554
                If specified, weights are saved in the format `pytorch_model.<variant>.bin`.
555
556
        """
        model_index_dict = dict(self.config)
557
558
        model_index_dict.pop("_class_name", None)
        model_index_dict.pop("_diffusers_version", None)
559
        model_index_dict.pop("_module", None)
560
        model_index_dict.pop("_name_or_path", None)
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575

        expected_modules, optional_kwargs = self._get_signature_keys(self)

        def is_saveable_module(name, value):
            if name not in expected_modules:
                return False
            if name in self._optional_components and value[0] is None:
                return False
            return True

        model_index_dict = {k: v for k, v in model_index_dict.items() if is_saveable_module(k, v)}
        for pipeline_component_name in model_index_dict.keys():
            sub_model = getattr(self, pipeline_component_name)
            model_cls = sub_model.__class__

576
577
578
579
580
581
            # Dynamo wraps the original model in a private class.
            # I didn't find a public API to get the original class.
            if is_compiled_module(sub_model):
                sub_model = sub_model._orig_mod
                model_cls = sub_model.__class__

582
583
584
            save_method_name = None
            # search for the model's base class in LOADABLE_CLASSES
            for library_name, library_classes in LOADABLE_CLASSES.items():
585
586
587
588
589
590
591
                if library_name in sys.modules:
                    library = importlib.import_module(library_name)
                else:
                    logger.info(
                        f"{library_name} is not installed. Cannot save {pipeline_component_name} as {library_classes} from {library_name}"
                    )

592
593
594
595
596
597
598
599
600
                for base_class, save_load_methods in library_classes.items():
                    class_candidate = getattr(library, base_class, None)
                    if class_candidate is not None and issubclass(model_cls, class_candidate):
                        # if we found a suitable base class in LOADABLE_CLASSES then grab its save method
                        save_method_name = save_load_methods[0]
                        break
                if save_method_name is not None:
                    break

601
602
603
604
605
606
            if save_method_name is None:
                logger.warn(f"self.{pipeline_component_name}={sub_model} of type {type(sub_model)} cannot be saved.")
                # make sure that unsaveable components are not tried to be loaded afterward
                self.register_to_config(**{pipeline_component_name: (None, None)})
                continue

607
608
609
610
611
            save_method = getattr(sub_model, save_method_name)

            # Call the save method with the argument safe_serialization only if it's supported
            save_method_signature = inspect.signature(save_method)
            save_method_accept_safe = "safe_serialization" in save_method_signature.parameters
612
613
614
            save_method_accept_variant = "variant" in save_method_signature.parameters

            save_kwargs = {}
615
            if save_method_accept_safe:
616
617
618
619
620
                save_kwargs["safe_serialization"] = safe_serialization
            if save_method_accept_variant:
                save_kwargs["variant"] = variant

            save_method(os.path.join(save_directory, pipeline_component_name), **save_kwargs)
621

622
623
624
        # finally save the config
        self.save_config(save_directory)

625
626
627
628
629
630
631
    def to(
        self,
        torch_device: Optional[Union[str, torch.device]] = None,
        torch_dtype: Optional[torch.dtype] = None,
        silence_dtype_warnings: bool = False,
    ):
        if torch_device is None and torch_dtype is None:
632
633
            return self

634
635
636
637
638
        # throw warning if pipeline is in "offloaded"-mode but user tries to manually set to GPU.
        def module_is_sequentially_offloaded(module):
            if not is_accelerate_available() or is_accelerate_version("<", "0.14.0"):
                return False

Patrick von Platen's avatar
Patrick von Platen committed
639
640
641
            return hasattr(module, "_hf_hook") and not isinstance(
                module._hf_hook, (accelerate.hooks.CpuOffload, accelerate.hooks.AlignDevicesHook)
            )
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664

        def module_is_offloaded(module):
            if not is_accelerate_available() or is_accelerate_version("<", "0.17.0.dev0"):
                return False

            return hasattr(module, "_hf_hook") and isinstance(module._hf_hook, accelerate.hooks.CpuOffload)

        # .to("cuda") would raise an error if the pipeline is sequentially offloaded, so we raise our own to make it clearer
        pipeline_is_sequentially_offloaded = any(
            module_is_sequentially_offloaded(module) for _, module in self.components.items()
        )
        if pipeline_is_sequentially_offloaded and torch.device(torch_device).type == "cuda":
            raise ValueError(
                "It seems like you have activated sequential model offloading by calling `enable_sequential_cpu_offload`, but are now attempting to move the pipeline to GPU. This is not compatible with offloading. Please, move your pipeline `.to('cpu')` or consider removing the move altogether if you use sequential offloading."
            )

        # Display a warning in this case (the operation succeeds but the benefits are lost)
        pipeline_is_offloaded = any(module_is_offloaded(module) for _, module in self.components.items())
        if pipeline_is_offloaded and torch.device(torch_device).type == "cuda":
            logger.warning(
                f"It seems like you have activated model offloading by calling `enable_model_cpu_offload`, but are now manually moving the pipeline to GPU. It is strongly recommended against doing so as memory gains from offloading are likely to be lost. Offloading automatically takes care of moving the individual components {', '.join(self.components.keys())} to GPU when needed. To make sure offloading works as expected, you should consider moving the pipeline back to CPU: `pipeline.to('cpu')` or removing the move altogether if you use offloading."
            )

665
        module_names, _ = self._get_signature_keys(self)
666
667
        modules = [getattr(self, n, None) for n in module_names]
        modules = [m for m in modules if isinstance(m, torch.nn.Module)]
668

669
        is_offloaded = pipeline_is_offloaded or pipeline_is_sequentially_offloaded
670
        for module in modules:
Patrick von Platen's avatar
Patrick von Platen committed
671
672
673
674
675
676
677
678
679
680
681
682
683
684
            is_loaded_in_8bit = hasattr(module, "is_loaded_in_8bit") and module.is_loaded_in_8bit

            if is_loaded_in_8bit and torch_dtype is not None:
                logger.warning(
                    f"The module '{module.__class__.__name__}' has been loaded in 8bit and conversion to {torch_dtype} is not yet supported. Module is still in 8bit precision."
                )

            if is_loaded_in_8bit and torch_device is not None:
                logger.warning(
                    f"The module '{module.__class__.__name__}' has been loaded in 8bit and moving it to {torch_dtype} via `.to()` is not yet supported. Module is still on {module.device}."
                )
            else:
                module.to(torch_device, torch_dtype)

685
686
687
688
689
690
691
692
693
694
695
696
697
            if (
                module.dtype == torch.float16
                and str(torch_device) in ["cpu"]
                and not silence_dtype_warnings
                and not is_offloaded
            ):
                logger.warning(
                    "Pipelines loaded with `torch_dtype=torch.float16` cannot run with `cpu` device. It"
                    " is not recommended to move them to `cpu` as running them will fail. Please make"
                    " sure to use an accelerator to run the pipeline in inference, due to the lack of"
                    " support for`float16` operations on this device in PyTorch. Please, remove the"
                    " `torch_dtype=torch.float16` argument, or use another device for inference."
                )
698
699
700
701
702
703
704
705
        return self

    @property
    def device(self) -> torch.device:
        r"""
        Returns:
            `torch.device`: The torch device on which the pipeline is located.
        """
706
        module_names, _ = self._get_signature_keys(self)
707
708
709
710
711
        modules = [getattr(self, n, None) for n in module_names]
        modules = [m for m in modules if isinstance(m, torch.nn.Module)]

        for module in modules:
            return module.device
712

713
714
715
716
717
        return torch.device("cpu")

    @classmethod
    def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
        r"""
Steven Liu's avatar
Steven Liu committed
718
        Instantiate a PyTorch diffusion pipeline from pretrained pipeline weights.
719

Steven Liu's avatar
Steven Liu committed
720
        The pipeline is set in evaluation mode (`model.eval()`) by default.
721

Steven Liu's avatar
Steven Liu committed
722
        If you get the error message below, you need to finetune the weights for your downstream task:
723

Steven Liu's avatar
Steven Liu committed
724
725
726
727
728
        ```
        Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:
        - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated
        You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
        ```
729
730
731
732
733

        Parameters:
            pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
                Can be either:

Steven Liu's avatar
Steven Liu committed
734
735
736
737
738
                    - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline
                      hosted on the Hub.
                    - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights
                      saved using
                    [`~DiffusionPipeline.save_pretrained`].
739
            torch_dtype (`str` or `torch.dtype`, *optional*):
Steven Liu's avatar
Steven Liu committed
740
741
                Override the default `torch.dtype` and load the model with another dtype. If "auto" is passed, the
                dtype is automatically derived from the model's weights.
742
743
744
745
            custom_pipeline (`str`, *optional*):

                <Tip warning={true}>

Steven Liu's avatar
Steven Liu committed
746
                🧪 This is an experimental feature and may change in the future.
747
748
749
750
751

                </Tip>

                Can be either:

Steven Liu's avatar
Steven Liu committed
752
753
754
                    - A string, the *repo id* (for example `hf-internal-testing/diffusers-dummy-pipeline`) of a custom
                      pipeline hosted on the Hub. The repository must contain a file called pipeline.py that defines
                      the custom pipeline.
755
                    - A string, the *file name* of a community pipeline hosted on GitHub under
Steven Liu's avatar
Steven Liu committed
756
757
758
759
760
761
                      [Community](https://github.com/huggingface/diffusers/tree/main/examples/community). Valid file
                      names must match the file name and not the pipeline script (`clip_guided_stable_diffusion`
                      instead of `clip_guided_stable_diffusion.py`). Community pipelines are always loaded from the
                      current main branch of GitHub.
                    - A path to a directory (`./my_pipeline_directory/`) containing a custom pipeline. The directory
                      must contain a file called `pipeline.py` that defines the custom pipeline.
762
763
764
765
766
767
768
769
770


                For more information on how to load and create custom pipelines, please have a look at [Loading and
                Adding Custom
                Pipelines](https://huggingface.co/docs/diffusers/using-diffusers/custom_pipeline_overview)

            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
771
            cache_dir (`Union[str, os.PathLike]`, *optional*):
Steven Liu's avatar
Steven Liu committed
772
773
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
774
            resume_download (`bool`, *optional*, defaults to `False`):
Steven Liu's avatar
Steven Liu committed
775
776
                Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
                incompletely downloaded files are deleted.
777
            proxies (`Dict[str, str]`, *optional*):
Steven Liu's avatar
Steven Liu committed
778
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
779
780
781
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            output_loading_info(`bool`, *optional*, defaults to `False`):
                Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
Steven Liu's avatar
Steven Liu committed
782
783
784
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
                won't be downloaded from the Hub.
785
            use_auth_token (`str` or *bool*, *optional*):
Steven Liu's avatar
Steven Liu committed
786
787
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
788
            revision (`str`, *optional*, defaults to `"main"`):
Steven Liu's avatar
Steven Liu committed
789
790
791
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
            custom_revision (`str`, *optional*, defaults to `"main"`):
792
                The specific model version to use. It can be a branch name, a tag name, or a commit id similar to
Steven Liu's avatar
Steven Liu committed
793
794
                `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a
                custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub.
795
            mirror (`str`, *optional*):
Steven Liu's avatar
Steven Liu committed
796
797
798
                Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not
                guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
                information.
799
            device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
Steven Liu's avatar
Steven Liu committed
800
801
                A map that specifies where each submodule should go. It doesn’t need to be defined for each
                parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the
802
803
                same device.

Steven Liu's avatar
Steven Liu committed
804
                Set `device_map="auto"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For
805
806
                more information about each option see [designing a device
                map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
807
            max_memory (`Dict`, *optional*):
Steven Liu's avatar
Steven Liu committed
808
809
                A dictionary device identifier for the maximum memory. Will default to the maximum memory available for
                each GPU and the available CPU RAM if unset.
810
            offload_folder (`str` or `os.PathLike`, *optional*):
Steven Liu's avatar
Steven Liu committed
811
                The path to offload weights if device_map contains the value `"disk"`.
812
            offload_state_dict (`bool`, *optional*):
Steven Liu's avatar
Steven Liu committed
813
814
815
                If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if
                the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True`
                when there is some disk offload.
816
            low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
Steven Liu's avatar
Steven Liu committed
817
818
819
820
                Speed up model loading only loading the pretrained weights and not initializing the weights. This also
                tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
                Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
                argument to `True` will raise an error.
821
            use_safetensors (`bool`, *optional*, defaults to `None`):
Steven Liu's avatar
Steven Liu committed
822
823
824
                If set to `None`, the safetensors weights are downloaded if they're available **and** if the
                safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
                weights. If set to `False`, safetensors weights are not loaded.
825
            kwargs (remaining dictionary of keyword arguments, *optional*):
Steven Liu's avatar
Steven Liu committed
826
827
828
                Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline
                class). The overwritten components are passed directly to the pipelines `__init__` method. See example
                below for more information.
829
            variant (`str`, *optional*):
Steven Liu's avatar
Steven Liu committed
830
831
                Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when
                loading `from_flax`.
832
833
834

        <Tip>

Steven Liu's avatar
Steven Liu committed
835
836
        To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with
        `huggingface-cli login`.
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866

        </Tip>

        Examples:

        ```py
        >>> from diffusers import DiffusionPipeline

        >>> # Download pipeline from huggingface.co and cache.
        >>> pipeline = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256")

        >>> # Download pipeline that requires an authorization token
        >>> # For more information on access tokens, please refer to this section
        >>> # of the documentation](https://huggingface.co/docs/hub/security-tokens)
        >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")

        >>> # Use a different scheduler
        >>> from diffusers import LMSDiscreteScheduler

        >>> scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config)
        >>> pipeline.scheduler = scheduler
        ```
        """
        cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
        resume_download = kwargs.pop("resume_download", False)
        force_download = kwargs.pop("force_download", False)
        proxies = kwargs.pop("proxies", None)
        local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
        use_auth_token = kwargs.pop("use_auth_token", None)
        revision = kwargs.pop("revision", None)
867
        from_flax = kwargs.pop("from_flax", False)
868
869
870
871
872
873
        torch_dtype = kwargs.pop("torch_dtype", None)
        custom_pipeline = kwargs.pop("custom_pipeline", None)
        custom_revision = kwargs.pop("custom_revision", None)
        provider = kwargs.pop("provider", None)
        sess_options = kwargs.pop("sess_options", None)
        device_map = kwargs.pop("device_map", None)
874
875
876
        max_memory = kwargs.pop("max_memory", None)
        offload_folder = kwargs.pop("offload_folder", None)
        offload_state_dict = kwargs.pop("offload_state_dict", False)
877
        low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
878
        variant = kwargs.pop("variant", None)
879
        use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False)
880
881
882
883

        # 1. Download the checkpoints and configs
        # use snapshot download here to get it working from from_pretrained
        if not os.path.isdir(pretrained_model_name_or_path):
884
            cached_folder = cls.download(
885
886
887
888
889
890
891
892
                pretrained_model_name_or_path,
                cache_dir=cache_dir,
                resume_download=resume_download,
                force_download=force_download,
                proxies=proxies,
                local_files_only=local_files_only,
                use_auth_token=use_auth_token,
                revision=revision,
893
                from_flax=from_flax,
894
                use_safetensors=use_safetensors,
895
                custom_pipeline=custom_pipeline,
896
                custom_revision=custom_revision,
897
                variant=variant,
898
                **kwargs,
899
900
901
902
            )
        else:
            cached_folder = pretrained_model_name_or_path

903
904
        config_dict = cls.load_config(cached_folder)

Patrick von Platen's avatar
Patrick von Platen committed
905
906
907
        # pop out "_ignore_files" as it is only needed for download
        config_dict.pop("_ignore_files", None)

908
909
910
        # 2. Define which model components should load variants
        # We retrieve the information by matching whether variant
        # model checkpoints exist in the subfolders
911
912
913
914
915
        model_variants = {}
        if variant is not None:
            for folder in os.listdir(cached_folder):
                folder_path = os.path.join(cached_folder, folder)
                is_folder = os.path.isdir(folder_path) and folder in config_dict
916
917
918
                variant_exists = is_folder and any(
                    p.split(".")[1].startswith(variant) for p in os.listdir(folder_path)
                )
919
920
921
                if variant_exists:
                    model_variants[folder] = variant

922
        # 3. Load the pipeline class, if using custom module then load it from the hub
923
        # if we load from explicit class, let's use it
924
925
926
        pipeline_class = _get_pipeline_class(
            cls, config_dict, custom_pipeline=custom_pipeline, cache_dir=cache_dir, revision=custom_revision
        )
927

928
        # DEPRECATED: To be removed in 1.0.0
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
        if pipeline_class.__name__ == "StableDiffusionInpaintPipeline" and version.parse(
            version.parse(config_dict["_diffusers_version"]).base_version
        ) <= version.parse("0.5.1"):
            from diffusers import StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy

            pipeline_class = StableDiffusionInpaintPipelineLegacy

            deprecation_message = (
                "You are using a legacy checkpoint for inpainting with Stable Diffusion, therefore we are loading the"
                f" {StableDiffusionInpaintPipelineLegacy} class instead of {StableDiffusionInpaintPipeline}. For"
                " better inpainting results, we strongly suggest using Stable Diffusion's official inpainting"
                " checkpoint: https://huggingface.co/runwayml/stable-diffusion-inpainting instead or adapting your"
                f" checkpoint {pretrained_model_name_or_path} to the format of"
                " https://huggingface.co/runwayml/stable-diffusion-inpainting. Note that we do not actively maintain"
                " the {StableDiffusionInpaintPipelineLegacy} class and will likely remove it in version 1.0.0."
            )
            deprecate("StableDiffusionInpaintPipelineLegacy", "1.0.0", deprecation_message, standard_warn=False)

947
948
949
        # 4. Define expected modules given pipeline signature
        # and define non-None initialized modules (=`init_kwargs`)

950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
        # some modules can be passed directly to the init
        # in this case they are already instantiated in `kwargs`
        # extract them here
        expected_modules, optional_kwargs = cls._get_signature_keys(pipeline_class)
        passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs}
        passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs}

        init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs)

        # define init kwargs
        init_kwargs = {k: init_dict.pop(k) for k in optional_kwargs if k in init_dict}
        init_kwargs = {**init_kwargs, **passed_pipe_kwargs}

        # remove `null` components
        def load_module(name, value):
            if value[0] is None:
                return False
            if name in passed_class_obj and passed_class_obj[name] is None:
                return False
            return True

        init_dict = {k: v for k, v in init_dict.items() if load_module(k, v)}

973
974
975
976
977
978
979
980
        # Special case: safety_checker must be loaded separately when using `from_flax`
        if from_flax and "safety_checker" in init_dict and "safety_checker" not in passed_class_obj:
            raise NotImplementedError(
                "The safety checker cannot be automatically loaded when loading weights `from_flax`."
                " Please, pass `safety_checker=None` to `from_pretrained`, and load the safety checker"
                " separately if you need it."
            )

981
        # 5. Throw nice warnings / errors for fast accelerate loading
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
        if len(unused_kwargs) > 0:
            logger.warning(
                f"Keyword arguments {unused_kwargs} are not expected by {pipeline_class.__name__} and will be ignored."
            )

        if low_cpu_mem_usage and not is_accelerate_available():
            low_cpu_mem_usage = False
            logger.warning(
                "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
                " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
                " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
                " install accelerate\n```\n."
            )

        if device_map is not None and not is_torch_version(">=", "1.9.0"):
            raise NotImplementedError(
                "Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set"
                " `device_map=None`."
            )

        if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"):
            raise NotImplementedError(
                "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set"
                " `low_cpu_mem_usage=False`."
            )

        if low_cpu_mem_usage is False and device_map is not None:
            raise ValueError(
                f"You cannot set `low_cpu_mem_usage` to False while using device_map={device_map} for loading and"
                " dispatching. Please make sure to set `low_cpu_mem_usage=True`."
            )

        # import it here to avoid circular import
        from diffusers import pipelines

1017
        # 6. Load each module in the pipeline
1018
        for name, (library_name, class_name) in tqdm(init_dict.items(), desc="Loading pipeline components..."):
1019
            # 6.1 - now that JAX/Flax is an official framework of the library, we might load from Flax names
1020
1021
1022
            if class_name.startswith("Flax"):
                class_name = class_name[4:]

1023
            # 6.2 Define all importable classes
1024
            is_pipeline_module = hasattr(pipelines, library_name)
1025
            importable_classes = ALL_IMPORTABLE_CLASSES
1026
1027
            loaded_sub_model = None

1028
            # 6.3 Use passed sub model or load class_name from library_name
1029
            if name in passed_class_obj:
1030
1031
1032
1033
1034
                # if the model is in a pipeline module, then we load it from the pipeline
                # check that passed_class_obj has correct parent class
                maybe_raise_or_warn(
                    library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module
                )
1035
1036
1037

                loaded_sub_model = passed_class_obj[name]
            else:
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
                # load sub model
                loaded_sub_model = load_sub_model(
                    library_name=library_name,
                    class_name=class_name,
                    importable_classes=importable_classes,
                    pipelines=pipelines,
                    is_pipeline_module=is_pipeline_module,
                    pipeline_class=pipeline_class,
                    torch_dtype=torch_dtype,
                    provider=provider,
                    sess_options=sess_options,
                    device_map=device_map,
1050
1051
1052
                    max_memory=max_memory,
                    offload_folder=offload_folder,
                    offload_state_dict=offload_state_dict,
1053
1054
1055
1056
1057
1058
                    model_variants=model_variants,
                    name=name,
                    from_flax=from_flax,
                    variant=variant,
                    low_cpu_mem_usage=low_cpu_mem_usage,
                    cached_folder=cached_folder,
1059
                )
1060
1061
1062
                logger.info(
                    f"Loaded {name} as {class_name} from `{name}` subfolder of {pretrained_model_name_or_path}."
                )
1063
1064
1065

            init_kwargs[name] = loaded_sub_model  # UNet(...), # DiffusionSchedule(...)

1066
        # 7. Potentially add passed objects if expected
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
        missing_modules = set(expected_modules) - set(init_kwargs.keys())
        passed_modules = list(passed_class_obj.keys())
        optional_modules = pipeline_class._optional_components
        if len(missing_modules) > 0 and missing_modules <= set(passed_modules + optional_modules):
            for module in missing_modules:
                init_kwargs[module] = passed_class_obj.get(module, None)
        elif len(missing_modules) > 0:
            passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs
            raise ValueError(
                f"Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed."
            )

1079
        # 8. Instantiate the pipeline
1080
        model = pipeline_class(**init_kwargs)
1081
1082
1083

        # 9. Save where the model was instantiated from
        model.register_to_config(_name_or_path=pretrained_model_name_or_path)
1084
1085
        return model

1086
1087
1088
1089
    @property
    def name_or_path(self) -> str:
        return getattr(self.config, "_name_or_path", None)

1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
    @property
    def _execution_device(self):
        r"""
        Returns the device on which the pipeline's models will be executed. After calling
        [`~DiffusionPipeline.enable_sequential_cpu_offload`] the execution device can only be inferred from
        Accelerate's module hooks.
        """
        for name, model in self.components.items():
            if not isinstance(model, torch.nn.Module) or name in self._exclude_from_cpu_offload:
                continue

            if not hasattr(model, "_hf_hook"):
                return self.device
            for module in model.modules():
                if (
                    hasattr(module, "_hf_hook")
                    and hasattr(module._hf_hook, "execution_device")
                    and module._hf_hook.execution_device is not None
                ):
                    return torch.device(module._hf_hook.execution_device)
        return self.device

    def enable_sequential_cpu_offload(self, gpu_id: int = 0, device: Union[torch.device, str] = "cuda"):
        r"""
        Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
        text_encoder, vae and safety checker have their state dicts saved to CPU and then are moved to a
        `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
        Note that offloading happens on a submodule basis. Memory savings are higher than with
        `enable_model_cpu_offload`, but performance is lower.
        """
        if is_accelerate_available() and is_accelerate_version(">=", "0.14.0"):
            from accelerate import cpu_offload
        else:
            raise ImportError("`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher")

        if device == "cuda":
            device = torch.device(f"{device}:{gpu_id}")

        if self.device.type != "cpu":
            self.to("cpu", silence_dtype_warnings=True)
1130
1131
1132
            device_mod = getattr(torch, self.device.type, None)
            if hasattr(device_mod, "empty_cache") and device_mod.is_available():
                device_mod.empty_cache()  # otherwise we don't see the memory savings (but they probably exist)
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145

        for name, model in self.components.items():
            if not isinstance(model, torch.nn.Module):
                continue

            if name in self._exclude_from_cpu_offload:
                model.to(device)
            else:
                # make sure to offload buffers if not all high level weights
                # are of type nn.Module
                offload_buffers = len(model._parameters) > 0
                cpu_offload(model, device, offload_buffers=offload_buffers)

1146
1147
1148
    @classmethod
    def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]:
        r"""
Steven Liu's avatar
Steven Liu committed
1149
        Download and cache a PyTorch diffusion pipeline from pretrained pipeline weights.
1150
1151

        Parameters:
Steven Liu's avatar
Steven Liu committed
1152
            pretrained_model_name (`str` or `os.PathLike`, *optional*):
Steven Liu's avatar
Steven Liu committed
1153
                A string, the *repository id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline
Steven Liu's avatar
Steven Liu committed
1154
                hosted on the Hub.
1155
1156
1157
            custom_pipeline (`str`, *optional*):
                Can be either:

Steven Liu's avatar
Steven Liu committed
1158
                    - A string, the *repository id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained
Steven Liu's avatar
Steven Liu committed
1159
1160
                      pipeline hosted on the Hub. The repository must contain a file called `pipeline.py` that defines
                      the custom pipeline.
1161
1162

                    - A string, the *file name* of a community pipeline hosted on GitHub under
Steven Liu's avatar
Steven Liu committed
1163
1164
1165
1166
                      [Community](https://github.com/huggingface/diffusers/tree/main/examples/community). Valid file
                      names must match the file name and not the pipeline script (`clip_guided_stable_diffusion`
                      instead of `clip_guided_stable_diffusion.py`). Community pipelines are always loaded from the
                      current `main` branch of GitHub.
1167

Steven Liu's avatar
Steven Liu committed
1168
1169
                    - A path to a *directory* (`./my_pipeline_directory/`) containing a custom pipeline. The directory
                      must contain a file called `pipeline.py` that defines the custom pipeline.
1170

Steven Liu's avatar
Steven Liu committed
1171
                <Tip warning={true}>
1172

Steven Liu's avatar
Steven Liu committed
1173
                🧪 This is an experimental feature and may change in the future.
1174

Steven Liu's avatar
Steven Liu committed
1175
                </Tip>
1176

Steven Liu's avatar
Steven Liu committed
1177
1178
                For more information on how to load and create custom pipelines, take a look at [How to contribute a
                community pipeline](https://huggingface.co/docs/diffusers/main/en/using-diffusers/contribute_pipeline).
1179
1180
1181
1182
1183

            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download (`bool`, *optional*, defaults to `False`):
Steven Liu's avatar
Steven Liu committed
1184
                Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
Steven Liu's avatar
Steven Liu committed
1185
                incompletely downloaded files are deleted.
1186
            proxies (`Dict[str, str]`, *optional*):
Steven Liu's avatar
Steven Liu committed
1187
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
1188
1189
1190
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            output_loading_info(`bool`, *optional*, defaults to `False`):
                Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
Steven Liu's avatar
Steven Liu committed
1191
1192
1193
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
                won't be downloaded from the Hub.
1194
            use_auth_token (`str` or *bool*, *optional*):
Steven Liu's avatar
Steven Liu committed
1195
1196
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
1197
            revision (`str`, *optional*, defaults to `"main"`):
Steven Liu's avatar
Steven Liu committed
1198
1199
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
Steven Liu's avatar
Steven Liu committed
1200
            custom_revision (`str`, *optional*, defaults to `"main"`):
1201
                The specific model version to use. It can be a branch name, a tag name, or a commit id similar to
Steven Liu's avatar
Steven Liu committed
1202
1203
                `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a
                custom pipeline from GitHub, otherwise it defaults to `"main"` when loading from the Hub.
1204
            mirror (`str`, *optional*):
Steven Liu's avatar
Steven Liu committed
1205
1206
1207
                Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
                guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
                information.
1208
            variant (`str`, *optional*):
Steven Liu's avatar
Steven Liu committed
1209
1210
1211
1212
1213
1214
                Load weights from a specified variant filename such as `"fp16"` or `"ema"`. This is ignored when
                loading `from_flax`.

        Returns:
            `os.PathLike`:
                A path to the downloaded pipeline.
1215
1216
1217

        <Tip>

Steven Liu's avatar
Steven Liu committed
1218
1219
        To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with
        `huggingface-cli login`.
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232

        </Tip>

        """
        cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
        resume_download = kwargs.pop("resume_download", False)
        force_download = kwargs.pop("force_download", False)
        proxies = kwargs.pop("proxies", None)
        local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
        use_auth_token = kwargs.pop("use_auth_token", None)
        revision = kwargs.pop("revision", None)
        from_flax = kwargs.pop("from_flax", False)
        custom_pipeline = kwargs.pop("custom_pipeline", None)
1233
        custom_revision = kwargs.pop("custom_revision", None)
1234
        variant = kwargs.pop("variant", None)
1235
1236
1237
1238
        use_safetensors = kwargs.pop("use_safetensors", None)

        if use_safetensors and not is_safetensors_available():
            raise ValueError(
1239
                "`use_safetensors`=True but safetensors is not installed. Please install safetensors with `pip install safetensors"
1240
1241
1242
1243
1244
1245
            )

        allow_pickle = False
        if use_safetensors is None:
            use_safetensors = is_safetensors_available()
            allow_pickle = True
1246
1247
1248
1249
1250

        pipeline_is_cached = False
        allow_patterns = None
        ignore_patterns = None

1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
        if not local_files_only:
            try:
                info = model_info(
                    pretrained_model_name,
                    use_auth_token=use_auth_token,
                    revision=revision,
                )
            except HTTPError as e:
                logger.warn(f"Couldn't connect to the Hub: {e}.\nWill try to load from local cache.")
                local_files_only = True

1262
1263
1264
1265
1266
        if not local_files_only:
            config_file = hf_hub_download(
                pretrained_model_name,
                cls.config_name,
                cache_dir=cache_dir,
1267
                revision=revision,
1268
1269
1270
1271
1272
1273
1274
                proxies=proxies,
                force_download=force_download,
                resume_download=resume_download,
                use_auth_token=use_auth_token,
            )

            config_dict = cls._dict_from_json_file(config_file)
Patrick von Platen's avatar
Patrick von Platen committed
1275
1276
1277

            ignore_filenames = config_dict.pop("_ignore_files", [])

1278
1279
1280
            # retrieve all folder_names that contain relevant files
            folder_names = [k for k, v in config_dict.items() if isinstance(v, list)]

1281
            filenames = {sibling.rfilename for sibling in info.siblings}
1282
1283
            model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=variant)

1284
1285
1286
1287
1288
1289
1290
1291
1292
            if len(variant_filenames) == 0 and variant is not None:
                deprecation_message = (
                    f"You are trying to load the model files of the `variant={variant}`, but no such modeling files are available."
                    f"The default model files: {model_filenames} will be loaded instead. Make sure to not load from `variant={variant}`"
                    "if such variant modeling files are not available. Doing so will lead to an error in v0.22.0 as defaulting to non-variant"
                    "modeling files is deprecated."
                )
                deprecate("no variant default", "0.22.0", deprecation_message, standard_warn=False)

Patrick von Platen's avatar
Patrick von Platen committed
1293
1294
1295
1296
            # remove ignored filenames
            model_filenames = set(model_filenames) - set(ignore_filenames)
            variant_filenames = set(variant_filenames) - set(ignore_filenames)

1297
1298
1299
            # if the whole pipeline is cached we don't have to ping the Hub
            if revision in DEPRECATED_REVISION_ARGS and version.parse(
                version.parse(__version__).base_version
1300
            ) >= version.parse("0.20.0"):
1301
1302
1303
1304
                warn_deprecated_model_variant(
                    pretrained_model_name, use_auth_token, variant, revision, model_filenames
                )

1305
            model_folder_names = {os.path.split(f)[0] for f in model_filenames}
1306
1307
1308
1309
1310
1311

            # all filenames compatible with variant will be added
            allow_patterns = list(model_filenames)

            # allow all patterns from non-model folders
            # this enables downloading schedulers, tokenizers, ...
1312
            allow_patterns += [f"{k}/*" for k in folder_names if k not in model_folder_names]
1313
            # also allow downloading config.json files with the model
1314
            allow_patterns += [os.path.join(k, "config.json") for k in model_folder_names]
1315
1316
1317
1318
1319
1320
1321
1322

            allow_patterns += [
                SCHEDULER_CONFIG_NAME,
                CONFIG_NAME,
                cls.config_name,
                CUSTOM_PIPELINE_FILE_NAME,
            ]

1323
1324
1325
1326
1327
1328
1329
            # retrieve passed components that should not be downloaded
            pipeline_class = _get_pipeline_class(
                cls, config_dict, custom_pipeline=custom_pipeline, cache_dir=cache_dir, revision=custom_revision
            )
            expected_components, _ = cls._get_signature_keys(pipeline_class)
            passed_components = [k for k in expected_components if k in kwargs]

1330
1331
1332
            if (
                use_safetensors
                and not allow_pickle
1333
1334
1335
                and not is_safetensors_compatible(
                    model_filenames, variant=variant, passed_components=passed_components
                )
1336
1337
1338
1339
            ):
                raise EnvironmentError(
                    f"Could not found the necessary `safetensors` weights in {model_filenames} (variant={variant})"
                )
1340
1341
            if from_flax:
                ignore_patterns = ["*.bin", "*.safetensors", "*.onnx", "*.pb"]
1342
1343
1344
            elif use_safetensors and is_safetensors_compatible(
                model_filenames, variant=variant, passed_components=passed_components
            ):
1345
1346
                ignore_patterns = ["*.bin", "*.msgpack"]

1347
1348
                safetensors_variant_filenames = {f for f in variant_filenames if f.endswith(".safetensors")}
                safetensors_model_filenames = {f for f in model_filenames if f.endswith(".safetensors")}
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
                if (
                    len(safetensors_variant_filenames) > 0
                    and safetensors_model_filenames != safetensors_variant_filenames
                ):
                    logger.warn(
                        f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(safetensors_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(safetensors_model_filenames - safetensors_variant_filenames)}\nIf this behavior is not expected, please check your folder structure."
                    )
            else:
                ignore_patterns = ["*.safetensors", "*.msgpack"]

1359
1360
                bin_variant_filenames = {f for f in variant_filenames if f.endswith(".bin")}
                bin_model_filenames = {f for f in model_filenames if f.endswith(".bin")}
1361
1362
1363
1364
1365
                if len(bin_variant_filenames) > 0 and bin_model_filenames != bin_variant_filenames:
                    logger.warn(
                        f"\nA mixture of {variant} and non-{variant} filenames will be loaded.\nLoaded {variant} filenames:\n[{', '.join(bin_variant_filenames)}]\nLoaded non-{variant} filenames:\n[{', '.join(bin_model_filenames - bin_variant_filenames)}\nIf this behavior is not expected, please check your folder structure."
                    )

1366
1367
1368
1369
1370
1371
1372
            # Don't download any objects that are passed
            allow_patterns = [
                p for p in allow_patterns if not (len(p.split("/")) == 2 and p.split("/")[0] in passed_components)
            ]
            # Don't download index files of forbidden patterns either
            ignore_patterns = ignore_patterns + [f"{i}.index.*json" for i in ignore_patterns]

1373
1374
1375
1376
1377
            re_ignore_pattern = [re.compile(fnmatch.translate(p)) for p in ignore_patterns]
            re_allow_pattern = [re.compile(fnmatch.translate(p)) for p in allow_patterns]

            expected_files = [f for f in filenames if not any(p.match(f) for p in re_ignore_pattern)]
            expected_files = [f for f in expected_files if any(p.match(f) for p in re_allow_pattern)]
1378

1379
1380
            snapshot_folder = Path(config_file).parent
            pipeline_is_cached = all((snapshot_folder / f).is_file() for f in expected_files)
1381

1382
            if pipeline_is_cached and not force_download:
1383
1384
1385
                # if the pipeline is cached, we can directly return it
                # else call snapshot_download
                return snapshot_folder
1386

1387
1388
1389
        user_agent = {"pipeline_class": cls.__name__}
        if custom_pipeline is not None and not custom_pipeline.endswith(".py"):
            user_agent["custom_pipeline"] = custom_pipeline
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406

        # download all allow_patterns - ignore_patterns
        cached_folder = snapshot_download(
            pretrained_model_name,
            cache_dir=cache_dir,
            resume_download=resume_download,
            proxies=proxies,
            local_files_only=local_files_only,
            use_auth_token=use_auth_token,
            revision=revision,
            allow_patterns=allow_patterns,
            ignore_patterns=ignore_patterns,
            user_agent=user_agent,
        )

        return cached_folder

1407
1408
1409
1410
1411
    @staticmethod
    def _get_signature_keys(obj):
        parameters = inspect.signature(obj.__init__).parameters
        required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty}
        optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty})
1412
        expected_modules = set(required_parameters.keys()) - {"self"}
1413
1414
1415
1416
1417
1418
        return expected_modules, optional_parameters

    @property
    def components(self) -> Dict[str, Any]:
        r"""
        The `self.components` property can be useful to run different pipelines with the same weights and
Steven Liu's avatar
Steven Liu committed
1419
1420
1421
1422
        configurations without reallocating additional memory.

        Returns (`dict`):
            A dictionary containing all the modules needed to initialize the pipeline.
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445

        Examples:

        ```py
        >>> from diffusers import (
        ...     StableDiffusionPipeline,
        ...     StableDiffusionImg2ImgPipeline,
        ...     StableDiffusionInpaintPipeline,
        ... )

        >>> text2img = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
        >>> img2img = StableDiffusionImg2ImgPipeline(**text2img.components)
        >>> inpaint = StableDiffusionInpaintPipeline(**text2img.components)
        ```
        """
        expected_modules, optional_parameters = self._get_signature_keys(self)
        components = {
            k: getattr(self, k) for k in self.config.keys() if not k.startswith("_") and k not in optional_parameters
        }

        if set(components.keys()) != expected_modules:
            raise ValueError(
                f"{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected"
1446
                f" {expected_modules} to be defined, but {components.keys()} are defined."
1447
1448
1449
1450
1451
1452
1453
            )

        return components

    @staticmethod
    def numpy_to_pil(images):
        """
Steven Liu's avatar
Steven Liu committed
1454
        Convert a NumPy image or a batch of images to a PIL image.
1455
        """
Patrick von Platen's avatar
Patrick von Platen committed
1456
        return numpy_to_pil(images)
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475

    def progress_bar(self, iterable=None, total=None):
        if not hasattr(self, "_progress_bar_config"):
            self._progress_bar_config = {}
        elif not isinstance(self._progress_bar_config, dict):
            raise ValueError(
                f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}."
            )

        if iterable is not None:
            return tqdm(iterable, **self._progress_bar_config)
        elif total is not None:
            return tqdm(total=total, **self._progress_bar_config)
        else:
            raise ValueError("Either `total` or `iterable` has to be defined.")

    def set_progress_bar_config(self, **kwargs):
        self._progress_bar_config = kwargs

1476
    def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None):
1477
        r"""
Steven Liu's avatar
Steven Liu committed
1478
1479
1480
1481
        Enable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/).

        When this option is enabled, you should observe lower GPU memory usage and a potential speed up during
        inference. Speed up during training is not guaranteed.
1482

Steven Liu's avatar
Steven Liu committed
1483
        <Tip warning={true}>
1484

Steven Liu's avatar
Steven Liu committed
1485
1486
1487
1488
        ⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes
        precedent.

        </Tip>
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508

        Parameters:
            attention_op (`Callable`, *optional*):
                Override the default `None` operator for use as `op` argument to the
                [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention)
                function of xFormers.

        Examples:

        ```py
        >>> import torch
        >>> from diffusers import DiffusionPipeline
        >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp

        >>> pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16)
        >>> pipe = pipe.to("cuda")
        >>> pipe.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp)
        >>> # Workaround for not accepting attention shape using VAE for Flash Attention
        >>> pipe.vae.enable_xformers_memory_efficient_attention(attention_op=None)
        ```
1509
        """
1510
        self.set_use_memory_efficient_attention_xformers(True, attention_op)
1511
1512
1513

    def disable_xformers_memory_efficient_attention(self):
        r"""
Steven Liu's avatar
Steven Liu committed
1514
        Disable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/).
1515
1516
1517
        """
        self.set_use_memory_efficient_attention_xformers(False)

1518
1519
1520
    def set_use_memory_efficient_attention_xformers(
        self, valid: bool, attention_op: Optional[Callable] = None
    ) -> None:
1521
1522
1523
1524
1525
        # Recursively walk through all the children.
        # Any children which exposes the set_use_memory_efficient_attention_xformers method
        # gets the message
        def fn_recursive_set_mem_eff(module: torch.nn.Module):
            if hasattr(module, "set_use_memory_efficient_attention_xformers"):
1526
                module.set_use_memory_efficient_attention_xformers(valid, attention_op)
1527
1528
1529
1530

            for child in module.children():
                fn_recursive_set_mem_eff(child)

1531
1532
1533
        module_names, _ = self._get_signature_keys(self)
        modules = [getattr(self, n, None) for n in module_names]
        modules = [m for m in modules if isinstance(m, torch.nn.Module)]
1534

1535
1536
        for module in modules:
            fn_recursive_set_mem_eff(module)
1537
1538
1539
1540
1541

    def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"):
        r"""
        Enable sliced attention computation.

Steven Liu's avatar
Steven Liu committed
1542
1543
        When this option is enabled, the attention module splits the input tensor in slices to compute attention in
        several steps. This is useful to save some memory in exchange for a small speed decrease.
1544
1545
1546
1547

        Args:
            slice_size (`str` or `int`, *optional*, defaults to `"auto"`):
                When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
Alexander Pivovarov's avatar
Alexander Pivovarov committed
1548
                `"max"`, maximum amount of memory will be saved by running only one slice at a time. If a number is
1549
1550
1551
1552
1553
1554
1555
                provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
                must be a multiple of `slice_size`.
        """
        self.set_attention_slice(slice_size)

    def disable_attention_slicing(self):
        r"""
Steven Liu's avatar
Steven Liu committed
1556
1557
        Disable sliced attention computation. If `enable_attention_slicing` was previously called, attention is
        computed in one step.
1558
1559
1560
1561
1562
        """
        # set slice_size = `None` to disable `attention slicing`
        self.enable_attention_slicing(None)

    def set_attention_slice(self, slice_size: Optional[int]):
1563
1564
1565
        module_names, _ = self._get_signature_keys(self)
        modules = [getattr(self, n, None) for n in module_names]
        modules = [m for m in modules if isinstance(m, torch.nn.Module) and hasattr(m, "set_attention_slice")]
1566

1567
1568
        for module in modules:
            module.set_attention_slice(slice_size)