"src/array/cuda/negative_sampling.hip" did not exist on "81831111a5553f7c749d094a159dd748c39e6f28"
pipeline_utils.py 33.6 KB
Newer Older
Patrick von Platen's avatar
Patrick von Platen committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# coding=utf-8
# Copyright 2022 The HuggingFace Inc. team.
# Copyright (c) 2022, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

Patrick von Platen's avatar
improve  
Patrick von Platen committed
17
import importlib
18
import inspect
Patrick von Platen's avatar
Patrick von Platen committed
19
import os
20
from dataclasses import dataclass
21
from pathlib import Path
22
from typing import Any, Dict, List, Optional, Union
anton-l's avatar
Style  
anton-l committed
23

24
import numpy as np
Pedro Cuenca's avatar
Pedro Cuenca committed
25
26
import torch

27
import diffusers
28
import PIL
Patrick von Platen's avatar
up  
Patrick von Platen committed
29
from huggingface_hub import snapshot_download
30
from packaging import version
31
from PIL import Image
hysts's avatar
hysts committed
32
from tqdm.auto import tqdm
Patrick von Platen's avatar
Patrick von Platen committed
33

Patrick von Platen's avatar
Patrick von Platen committed
34
from .configuration_utils import ConfigMixin
Patrick von Platen's avatar
Patrick von Platen committed
35
from .dynamic_modules_utils import get_class_from_dynamic_module
36
from .hub_utils import http_user_agent
37
from .modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT
38
from .schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
39
40
41
42
43
44
from .utils import (
    CONFIG_NAME,
    DIFFUSERS_CACHE,
    ONNX_WEIGHTS_NAME,
    WEIGHTS_NAME,
    BaseOutput,
45
    deprecate,
46
47
    is_accelerate_available,
    is_torch_version,
48
49
50
51
52
53
    is_transformers_available,
    logging,
)


if is_transformers_available():
54
    import transformers
55
    from transformers import PreTrainedModel
Patrick von Platen's avatar
improve  
Patrick von Platen committed
56

Patrick von Platen's avatar
Patrick von Platen committed
57

Patrick von Platen's avatar
Patrick von Platen committed
58
INDEX_FILE = "diffusion_pytorch_model.bin"
Patrick von Platen's avatar
Patrick von Platen committed
59
CUSTOM_PIPELINE_FILE_NAME = "pipeline.py"
60
DUMMY_MODULES_FOLDER = "diffusers.utils"
61
TRANSFORMERS_DUMMY_MODULES_FOLDER = "transformers.utils"
Patrick von Platen's avatar
Patrick von Platen committed
62
63
64
65
66
67
68


logger = logging.get_logger(__name__)


LOADABLE_CLASSES = {
    "diffusers": {
Patrick von Platen's avatar
Patrick von Platen committed
69
        "ModelMixin": ["save_pretrained", "from_pretrained"],
70
        "SchedulerMixin": ["save_pretrained", "from_pretrained"],
Patrick von Platen's avatar
Patrick von Platen committed
71
        "DiffusionPipeline": ["save_pretrained", "from_pretrained"],
72
        "OnnxRuntimeModel": ["save_pretrained", "from_pretrained"],
Patrick von Platen's avatar
Patrick von Platen committed
73
74
    },
    "transformers": {
anton-l's avatar
anton-l committed
75
        "PreTrainedTokenizer": ["save_pretrained", "from_pretrained"],
76
        "PreTrainedTokenizerFast": ["save_pretrained", "from_pretrained"],
anton-l's avatar
anton-l committed
77
        "PreTrainedModel": ["save_pretrained", "from_pretrained"],
Suraj Patil's avatar
Suraj Patil committed
78
        "FeatureExtractionMixin": ["save_pretrained", "from_pretrained"],
79
80
        "ProcessorMixin": ["save_pretrained", "from_pretrained"],
        "ImageProcessingMixin": ["save_pretrained", "from_pretrained"],
Patrick von Platen's avatar
Patrick von Platen committed
81
    },
Prathik Rao's avatar
Prathik Rao committed
82
83
84
    "onnxruntime.training": {
        "ORTModule": ["save_pretrained", "from_pretrained"],
    },
Patrick von Platen's avatar
Patrick von Platen committed
85
86
}

87
88
89
90
ALL_IMPORTABLE_CLASSES = {}
for library in LOADABLE_CLASSES:
    ALL_IMPORTABLE_CLASSES.update(LOADABLE_CLASSES[library])

Patrick von Platen's avatar
Patrick von Platen committed
91

92
93
94
95
96
97
98
99
100
101
102
103
104
105
@dataclass
class ImagePipelineOutput(BaseOutput):
    """
    Output class for image pipelines.

    Args:
        images (`List[PIL.Image.Image]` or `np.ndarray`)
            List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width,
            num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline.
    """

    images: Union[List[PIL.Image.Image], np.ndarray]


106
107
108
109
110
111
112
113
114
115
116
117
118
119
@dataclass
class AudioPipelineOutput(BaseOutput):
    """
    Output class for audio pipelines.

    Args:
        audios (`np.ndarray`)
            List of denoised samples of shape `(batch_size, num_channels, sample_rate)`. Numpy array present the
            denoised audio samples of the diffusion pipeline.
    """

    audios: np.ndarray


Patrick von Platen's avatar
Patrick von Platen committed
120
class DiffusionPipeline(ConfigMixin):
121
122
123
124
125
126
127
128
129
130
131
132
    r"""
    Base class for all models.

    [`DiffusionPipeline`] takes care of storing all components (models, schedulers, processors) for diffusion pipelines
    and handles methods for loading, downloading and saving models as well as a few methods common to all pipelines to:

        - move all PyTorch modules to the device of your choice
        - enabling/disabling the progress bar for the denoising iteration

    Class attributes:

        - **config_name** ([`str`]) -- name of the config file that will store the class and module names of all
133
          components of the diffusion pipeline.
134
    """
Patrick von Platen's avatar
Patrick von Platen committed
135
136
    config_name = "model_index.json"

Patrick von Platen's avatar
up  
Patrick von Platen committed
137
    def register_modules(self, **kwargs):
138
139
        # import it here to avoid circular import
        from diffusers import pipelines
140

Patrick von Platen's avatar
Patrick von Platen committed
141
        for name, module in kwargs.items():
142
            # retrieve library
143
144
145
146
            if module is None:
                register_dict = {name: (None, None)}
            else:
                library = module.__module__.split(".")[0]
147

148
                # check if the module is a pipeline module
149
                pipeline_dir = module.__module__.split(".")[-2] if len(module.__module__.split(".")) > 2 else None
150
151
                path = module.__module__.split(".")
                is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir)
152

153
154
155
156
157
                # if library is not in LOADABLE_CLASSES, then it is a custom module.
                # Or if it's a pipeline module, then the module is inside the pipeline
                # folder so we set the library to module name.
                if library not in LOADABLE_CLASSES or is_pipeline_module:
                    library = pipeline_dir
patil-suraj's avatar
patil-suraj committed
158

159
160
                # retrieve class_name
                class_name = module.__class__.__name__
Patrick von Platen's avatar
Patrick von Platen committed
161

162
                register_dict = {name: (library, class_name)}
163

Patrick von Platen's avatar
Patrick von Platen committed
164
            # save model index config
165
            self.register_to_config(**register_dict)
Patrick von Platen's avatar
Patrick von Platen committed
166
167
168

            # set models
            setattr(self, name, module)
169

Patrick von Platen's avatar
Patrick von Platen committed
170
    def save_pretrained(self, save_directory: Union[str, os.PathLike]):
171
172
173
174
175
176
177
178
179
        """
        Save all variables of the pipeline that can be saved and loaded as well as the pipelines configuration file to
        a directory. A pipeline variable can be saved and loaded if its class implements both a save and loading
        method. The pipeline can easily be re-loaded using the `[`~DiffusionPipeline.from_pretrained`]` class method.

        Arguments:
            save_directory (`str` or `os.PathLike`):
                Directory to which to save. Will be created if it doesn't exist.
        """
Patrick von Platen's avatar
Patrick von Platen committed
180
181
        self.save_config(save_directory)

Patrick von Platen's avatar
Patrick von Platen committed
182
        model_index_dict = dict(self.config)
Patrick von Platen's avatar
Patrick von Platen committed
183
        model_index_dict.pop("_class_name")
184
        model_index_dict.pop("_diffusers_version")
185
        model_index_dict.pop("_module", None)
Patrick von Platen's avatar
Patrick von Platen committed
186

anton-l's avatar
anton-l committed
187
188
        for pipeline_component_name in model_index_dict.keys():
            sub_model = getattr(self, pipeline_component_name)
189
190
191
192
            if sub_model is None:
                # edge case for saving a pipeline with safety_checker=None
                continue

anton-l's avatar
anton-l committed
193
            model_cls = sub_model.__class__
Patrick von Platen's avatar
Patrick von Platen committed
194
195

            save_method_name = None
anton-l's avatar
anton-l committed
196
197
198
199
            # search for the model's base class in LOADABLE_CLASSES
            for library_name, library_classes in LOADABLE_CLASSES.items():
                library = importlib.import_module(library_name)
                for base_class, save_load_methods in library_classes.items():
200
201
                    class_candidate = getattr(library, base_class, None)
                    if class_candidate is not None and issubclass(model_cls, class_candidate):
anton-l's avatar
anton-l committed
202
203
204
205
206
207
208
209
                        # if we found a suitable base class in LOADABLE_CLASSES then grab its save method
                        save_method_name = save_load_methods[0]
                        break
                if save_method_name is not None:
                    break

            save_method = getattr(sub_model, save_method_name)
            save_method(os.path.join(save_directory, pipeline_component_name))
Patrick von Platen's avatar
Patrick von Platen committed
210

Pedro Cuenca's avatar
Pedro Cuenca committed
211
212
213
214
    def to(self, torch_device: Optional[Union[str, torch.device]] = None):
        if torch_device is None:
            return self

215
        module_names, _, _ = self.extract_init_dict(dict(self.config))
Pedro Cuenca's avatar
Pedro Cuenca committed
216
217
218
        for name in module_names.keys():
            module = getattr(self, name)
            if isinstance(module, torch.nn.Module):
219
                if module.dtype == torch.float16 and str(torch_device) in ["cpu"]:
220
                    logger.warning(
221
222
223
224
225
                        "Pipelines loaded with `torch_dtype=torch.float16` cannot run with `cpu` device. It"
                        " is not recommended to move them to `cpu` as running them will fail. Please make"
                        " sure to use an accelerator to run the pipeline in inference, due to the lack of"
                        " support for`float16` operations on this device in PyTorch. Please, remove the"
                        " `torch_dtype=torch.float16` argument, or use another device for inference."
226
                    )
Pedro Cuenca's avatar
Pedro Cuenca committed
227
228
229
230
231
                module.to(torch_device)
        return self

    @property
    def device(self) -> torch.device:
232
233
234
235
        r"""
        Returns:
            `torch.device`: The torch device on which the pipeline is located.
        """
236
        module_names, _, _ = self.extract_init_dict(dict(self.config))
Pedro Cuenca's avatar
Pedro Cuenca committed
237
238
239
240
241
242
        for name in module_names.keys():
            module = getattr(self, name)
            if isinstance(module, torch.nn.Module):
                return module.device
        return torch.device("cpu")

Patrick von Platen's avatar
Patrick von Platen committed
243
244
    @classmethod
    def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):
245
        r"""
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
        Instantiate a PyTorch diffusion pipeline from pre-trained pipeline weights.

        The pipeline is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated).

        The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come
        pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
        task.

        The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those
        weights are discarded.

        Parameters:
            pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):
                Can be either:

                    - A string, the *repo id* of a pretrained pipeline hosted inside a model repo on
                      https://huggingface.co/ Valid repo ids have to be located under a user or organization name, like
                      `CompVis/ldm-text2im-large-256`.
                    - A path to a *directory* containing pipeline weights saved using
                      [`~DiffusionPipeline.save_pretrained`], e.g., `./my_pipeline_directory/`.
            torch_dtype (`str` or `torch.dtype`, *optional*):
                Override the default `torch.dtype` and load the model under this dtype. If `"auto"` is passed the dtype
                will be automatically derived from the model's weights.
Patrick von Platen's avatar
Patrick von Platen committed
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
            custom_pipeline (`str`, *optional*):

                <Tip warning={true}>

                    This is an experimental feature and is likely to change in the future.

                </Tip>

                Can be either:

                    - A string, the *repo id* of a custom pipeline hosted inside a model repo on
                      https://huggingface.co/. Valid repo ids have to be located under a user or organization name,
                      like `hf-internal-testing/diffusers-dummy-pipeline`.

                        <Tip>

                         It is required that the model repo has a file, called `pipeline.py` that defines the custom
                         pipeline.

                        </Tip>

                    - A string, the *file name* of a community pipeline hosted on GitHub under
                      https://github.com/huggingface/diffusers/tree/main/examples/community. Valid file names have to
                      match exactly the file name without `.py` located under the above link, *e.g.*
                      `clip_guided_stable_diffusion`.

                        <Tip>

                         Community pipelines are always loaded from the current `main` branch of GitHub.

                        </Tip>

                    - A path to a *directory* containing a custom pipeline, e.g., `./my_pipeline_directory/`.

                        <Tip>

                         It is required that the directory has a file, called `pipeline.py` that defines the custom
                         pipeline.

                        </Tip>

                For more information on how to load and create custom pipelines, please have a look at [Loading and
311
312
                Adding Custom
                Pipelines](https://huggingface.co/docs/diffusers/using-diffusers/custom_pipeline_overview)
Patrick von Platen's avatar
Patrick von Platen committed
313
314

            torch_dtype (`str` or `torch.dtype`, *optional*):
315
316
317
318
319
320
321
322
323
324
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download (`bool`, *optional*, defaults to `False`):
                Whether or not to delete incompletely received files. Will attempt to resume the download if such a
                file exists.
            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            output_loading_info(`bool`, *optional*, defaults to `False`):
325
                Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.
326
327
328
329
330
331
332
333
334
335
336
337
338
            local_files_only(`bool`, *optional*, defaults to `False`):
                Whether or not to only look at local files (i.e., do not try to download the model).
            use_auth_token (`str` or *bool*, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated
                when running `huggingface-cli login` (stored in `~/.huggingface`).
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
                git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
                identifier allowed by git.
            mirror (`str`, *optional*):
                Mirror source to accelerate downloads in China. If you are from China and have an accessibility
                problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
                Please refer to the mirror site for more information. specify the folder name here.
339
340
341
342
343
344
345
346
347
348
349
350
351
            device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):
                A map that specifies where each submodule should go. It doesn't need to be refined to each
                parameter/buffer name, once a given module name is inside, every submodule of it will be sent to the
                same device.

                To have Accelerate compute the most optimized `device_map` automatically, set `device_map="auto"`. For
                more information about each option see [designing a device
                map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).
            low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
                Speed up model loading by not initializing the weights and only loading the pre-trained weights. This
                also tries to not use more than 1x model size in CPU memory (including peak memory) while loading the
                model. This is only supported when torch version >= 1.9.0. If you are using an older version of torch,
                setting this argument to `True` will raise an error.
352
353
354

            kwargs (remaining dictionary of keyword arguments, *optional*):
                Can be used to overwrite load - and saveable variables - *i.e.* the pipeline components - of the
355
356
                specific pipeline class. The overwritten components are then directly passed to the pipelines
                `__init__` method. See example below for more information.
357
358
359

        <Tip>

360
         It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated
apolinario's avatar
apolinario committed
361
         models](https://huggingface.co/docs/hub/models-gated#gated-models), *e.g.* `"runwayml/stable-diffusion-v1-5"`
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382

        </Tip>

        <Tip>

        Activate the special ["offline-mode"](https://huggingface.co/diffusers/installation.html#offline-mode) to use
        this method in a firewalled environment.

        </Tip>

        Examples:

        ```py
        >>> from diffusers import DiffusionPipeline

        >>> # Download pipeline from huggingface.co and cache.
        >>> pipeline = DiffusionPipeline.from_pretrained("CompVis/ldm-text2im-large-256")

        >>> # Download pipeline that requires an authorization token
        >>> # For more information on access tokens, please refer to this section
        >>> # of the documentation](https://huggingface.co/docs/hub/security-tokens)
apolinario's avatar
apolinario committed
383
        >>> pipeline = DiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
384

385
        >>> # Use a different scheduler
386
387
        >>> from diffusers import LMSDiscreteScheduler

388
389
        >>> scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config)
        >>> pipeline.scheduler = scheduler
390
        ```
391
392
393
        """
        cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
        resume_download = kwargs.pop("resume_download", False)
394
        force_download = kwargs.pop("force_download", False)
395
396
397
        proxies = kwargs.pop("proxies", None)
        local_files_only = kwargs.pop("local_files_only", False)
        use_auth_token = kwargs.pop("use_auth_token", None)
398
        revision = kwargs.pop("revision", None)
399
        torch_dtype = kwargs.pop("torch_dtype", None)
Patrick von Platen's avatar
Patrick von Platen committed
400
        custom_pipeline = kwargs.pop("custom_pipeline", None)
401
        provider = kwargs.pop("provider", None)
402
        sess_options = kwargs.pop("sess_options", None)
403
        device_map = kwargs.pop("device_map", None)
404
405
        low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)

406
407
408
409
410
411
412
413
414
        if low_cpu_mem_usage and not is_accelerate_available():
            low_cpu_mem_usage = False
            logger.warn(
                "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
                " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
                " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
                " install accelerate\n```\n."
            )

415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
        if device_map is not None and not is_torch_version(">=", "1.9.0"):
            raise NotImplementedError(
                "Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set"
                " `device_map=None`."
            )

        if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"):
            raise NotImplementedError(
                "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set"
                " `low_cpu_mem_usage=False`."
            )

        if low_cpu_mem_usage is False and device_map is not None:
            raise ValueError(
                f"You cannot set `low_cpu_mem_usage` to False while using device_map={device_map} for loading and"
                " dispatching. Please make sure to set `low_cpu_mem_usage=True`."
            )
Patrick von Platen's avatar
Patrick von Platen committed
432

patil-suraj's avatar
patil-suraj committed
433
        # 1. Download the checkpoints and configs
Patrick von Platen's avatar
Patrick von Platen committed
434
        # use snapshot download here to get it working from from_pretrained
Patrick von Platen's avatar
Patrick von Platen committed
435
        if not os.path.isdir(pretrained_model_name_or_path):
436
            config_dict = cls.load_config(
437
438
439
                pretrained_model_name_or_path,
                cache_dir=cache_dir,
                resume_download=resume_download,
440
                force_download=force_download,
441
442
443
444
445
446
447
448
449
450
                proxies=proxies,
                local_files_only=local_files_only,
                use_auth_token=use_auth_token,
                revision=revision,
            )
            # make sure we only download sub-folders and `diffusers` filenames
            folder_names = [k for k in config_dict.keys() if not k.startswith("_")]
            allow_patterns = [os.path.join(k, "*") for k in folder_names]
            allow_patterns += [WEIGHTS_NAME, SCHEDULER_CONFIG_NAME, CONFIG_NAME, ONNX_WEIGHTS_NAME, cls.config_name]

451
452
453
            # make sure we don't download flax weights
            ignore_patterns = "*.msgpack"

Patrick von Platen's avatar
Patrick von Platen committed
454
455
456
            if custom_pipeline is not None:
                allow_patterns += [CUSTOM_PIPELINE_FILE_NAME]

457
458
459
460
461
            if cls != DiffusionPipeline:
                requested_pipeline_class = cls.__name__
            else:
                requested_pipeline_class = config_dict.get("_class_name", cls.__name__)
            user_agent = {"pipeline_class": requested_pipeline_class}
462
463
            if custom_pipeline is not None:
                user_agent["custom_pipeline"] = custom_pipeline
464
            user_agent = http_user_agent(user_agent)
465

466
            # download all allow_patterns
467
468
469
470
471
472
473
            cached_folder = snapshot_download(
                pretrained_model_name_or_path,
                cache_dir=cache_dir,
                resume_download=resume_download,
                proxies=proxies,
                local_files_only=local_files_only,
                use_auth_token=use_auth_token,
474
                revision=revision,
475
                allow_patterns=allow_patterns,
476
                ignore_patterns=ignore_patterns,
477
                user_agent=user_agent,
478
            )
Patrick von Platen's avatar
Patrick von Platen committed
479
480
        else:
            cached_folder = pretrained_model_name_or_path
481

482
        config_dict = cls.load_config(cached_folder)
483

Patrick von Platen's avatar
Patrick von Platen committed
484
        # 2. Load the pipeline class, if using custom module then load it from the hub
485
        # if we load from explicit class, let's use it
Patrick von Platen's avatar
Patrick von Platen committed
486
        if custom_pipeline is not None:
487
488
489
490
491
492
493
494
            if custom_pipeline.endswith(".py"):
                path = Path(custom_pipeline)
                # decompose into folder & file
                file_name = path.name
                custom_pipeline = path.parent.absolute()
            else:
                file_name = CUSTOM_PIPELINE_FILE_NAME

Patrick von Platen's avatar
Patrick von Platen committed
495
            pipeline_class = get_class_from_dynamic_module(
496
                custom_pipeline, module_file=file_name, cache_dir=custom_pipeline
Patrick von Platen's avatar
Patrick von Platen committed
497
498
            )
        elif cls != DiffusionPipeline:
499
500
            pipeline_class = cls
        else:
Patrick von Platen's avatar
Patrick von Platen committed
501
502
503
            diffusers_module = importlib.import_module(cls.__module__.split(".")[0])
            pipeline_class = getattr(diffusers_module, config_dict["_class_name"])

504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
        # To be removed in 1.0.0
        if pipeline_class.__name__ == "StableDiffusionInpaintPipeline" and version.parse(
            version.parse(config_dict["_diffusers_version"]).base_version
        ) <= version.parse("0.5.1"):
            from diffusers import StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy

            pipeline_class = StableDiffusionInpaintPipelineLegacy

            deprecation_message = (
                "You are using a legacy checkpoint for inpainting with Stable Diffusion, therefore we are loading the"
                f" {StableDiffusionInpaintPipelineLegacy} class instead of {StableDiffusionInpaintPipeline}. For"
                " better inpainting results, we strongly suggest using Stable Diffusion's official inpainting"
                " checkpoint: https://huggingface.co/runwayml/stable-diffusion-inpainting instead or adapting your"
                f" checkpoint {pretrained_model_name_or_path} to the format of"
                " https://huggingface.co/runwayml/stable-diffusion-inpainting. Note that we do not actively maintain"
                " the {StableDiffusionInpaintPipelineLegacy} class and will likely remove it in version 1.0.0."
            )
            deprecate("StableDiffusionInpaintPipelineLegacy", "1.0.0", deprecation_message, standard_warn=False)

523
524
525
        # some modules can be passed directly to the init
        # in this case they are already instantiated in `kwargs`
        # extract them here
Patrick von Platen's avatar
Patrick von Platen committed
526
        expected_modules = set(inspect.signature(pipeline_class.__init__).parameters.keys()) - set(["self"])
527
528
        passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs}

529
        init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs)
530
531
532

        if len(unused_kwargs) > 0:
            logger.warning(f"Keyword arguments {unused_kwargs} not recognized.")
Patrick von Platen's avatar
Patrick von Platen committed
533
534

        init_kwargs = {}
535

536
537
        # import it here to avoid circular import
        from diffusers import pipelines
538

Patrick von Platen's avatar
Patrick von Platen committed
539
        # 3. Load each module in the pipeline
patil-suraj's avatar
patil-suraj committed
540
        for name, (library_name, class_name) in init_dict.items():
541
542
543
544
545
            if class_name is None:
                # edge case for when the pipeline was saved with safety_checker=None
                init_kwargs[name] = None
                continue

546
547
548
549
            # 3.1 - now that JAX/Flax is an official framework of the library, we might load from Flax names
            if class_name.startswith("Flax"):
                class_name = class_name[4:]

550
            is_pipeline_module = hasattr(pipelines, library_name)
551
            loaded_sub_model = None
552
            sub_model_should_be_defined = True
553

554
            # if the model is in a pipeline module, then we load it from the pipeline
555
556
            if name in passed_class_obj:
                # 1. check that passed_class_obj has correct parent class
557
                if not is_pipeline_module and passed_class_obj[name] is not None:
558
559
560
                    library = importlib.import_module(library_name)
                    class_obj = getattr(library, class_name)
                    importable_classes = LOADABLE_CLASSES[library_name]
561
                    class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()}
562
563
564

                    expected_class_obj = None
                    for class_name, class_candidate in class_candidates.items():
565
                        if class_candidate is not None and issubclass(class_obj, class_candidate):
566
567
568
569
570
571
572
                            expected_class_obj = class_candidate

                    if not issubclass(passed_class_obj[name].__class__, expected_class_obj):
                        raise ValueError(
                            f"{passed_class_obj[name]} is of type: {type(passed_class_obj[name])}, but should be"
                            f" {expected_class_obj}"
                        )
573
574
575
576
577
578
                elif passed_class_obj[name] is None:
                    logger.warn(
                        f"You have passed `None` for {name} to disable its functionality in {pipeline_class}. Note"
                        f" that this might lead to problems when using {pipeline_class} and is not recommended."
                    )
                    sub_model_should_be_defined = False
579
580
581
582
583
584
585
586
587
                else:
                    logger.warn(
                        f"You have passed a non-standard module {passed_class_obj[name]}. We cannot verify whether it"
                        " has the correct type"
                    )

                # set passed class object
                loaded_sub_model = passed_class_obj[name]
            elif is_pipeline_module:
588
589
590
                pipeline_module = getattr(pipelines, library_name)
                class_obj = getattr(pipeline_module, class_name)
                importable_classes = ALL_IMPORTABLE_CLASSES
Patrick von Platen's avatar
Patrick von Platen committed
591
                class_candidates = {c: class_obj for c in importable_classes.keys()}
patil-suraj's avatar
patil-suraj committed
592
            else:
patil-suraj's avatar
patil-suraj committed
593
                # else we just import it from the library.
patil-suraj's avatar
patil-suraj committed
594
                library = importlib.import_module(library_name)
595

patil-suraj's avatar
patil-suraj committed
596
                class_obj = getattr(library, class_name)
597
                importable_classes = LOADABLE_CLASSES[library_name]
598
                class_candidates = {c: getattr(library, c, None) for c in importable_classes.keys()}
599

600
            if loaded_sub_model is None and sub_model_should_be_defined:
601
602
                load_method_name = None
                for class_name, class_candidate in class_candidates.items():
603
                    if class_candidate is not None and issubclass(class_obj, class_candidate):
604
                        load_method_name = importable_classes[class_name][1]
Patrick von Platen's avatar
Patrick von Platen committed
605

606
607
                if load_method_name is None:
                    none_module = class_obj.__module__
608
609
610
611
                    is_dummy_path = none_module.startswith(DUMMY_MODULES_FOLDER) or none_module.startswith(
                        TRANSFORMERS_DUMMY_MODULES_FOLDER
                    )
                    if is_dummy_path and "dummy" in none_module:
612
613
614
615
616
617
618
                        # call class_obj for nice error message of missing requirements
                        class_obj()

                    raise ValueError(
                        f"The component {class_obj} of {pipeline_class} cannot be loaded as it does not seem to have"
                        f" any of the loading methods defined in {ALL_IMPORTABLE_CLASSES}."
                    )
Patrick von Platen's avatar
Patrick von Platen committed
619

620
                load_method = getattr(class_obj, load_method_name)
621
                loading_kwargs = {}
622

623
624
                if issubclass(class_obj, torch.nn.Module):
                    loading_kwargs["torch_dtype"] = torch_dtype
625
626
                if issubclass(class_obj, diffusers.OnnxRuntimeModel):
                    loading_kwargs["provider"] = provider
627
                    loading_kwargs["sess_options"] = sess_options
628

629
630
631
                is_diffusers_model = issubclass(class_obj, diffusers.ModelMixin)
                is_transformers_model = (
                    is_transformers_available()
632
                    and issubclass(class_obj, PreTrainedModel)
633
634
635
                    and version.parse(version.parse(transformers.__version__).base_version) >= version.parse("4.20.0")
                )

636
                # When loading a transformers model, if the device_map is None, the weights will be initialized as opposed to diffusers.
637
                # To make default loading faster we set the `low_cpu_mem_usage=low_cpu_mem_usage` flag which is `True` by default.
638
                # This makes sure that the weights won't be initialized which significantly speeds up loading.
639
                if is_diffusers_model or is_transformers_model:
640
                    loading_kwargs["device_map"] = device_map
641
                    loading_kwargs["low_cpu_mem_usage"] = low_cpu_mem_usage
642

643
644
                # check if the module is in a subdirectory
                if os.path.isdir(os.path.join(cached_folder, name)):
645
                    loaded_sub_model = load_method(os.path.join(cached_folder, name), **loading_kwargs)
646
647
                else:
                    # else load from the root directory
648
                    loaded_sub_model = load_method(cached_folder, **loading_kwargs)
Patrick von Platen's avatar
Patrick von Platen committed
649

650
            init_kwargs[name] = loaded_sub_model  # UNet(...), # DiffusionSchedule(...)
Patrick von Platen's avatar
Patrick von Platen committed
651

Patrick von Platen's avatar
Patrick von Platen committed
652
653
654
655
656
657
        # 4. Potentially add passed objects if expected
        missing_modules = set(expected_modules) - set(init_kwargs.keys())
        if len(missing_modules) > 0 and missing_modules <= set(passed_class_obj.keys()):
            for module in missing_modules:
                init_kwargs[module] = passed_class_obj[module]
        elif len(missing_modules) > 0:
658
            passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys()))
Patrick von Platen's avatar
Patrick von Platen committed
659
660
661
662
663
            raise ValueError(
                f"Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed."
            )

        # 5. Instantiate the pipeline
664
        model = pipeline_class(**init_kwargs)
Patrick von Platen's avatar
Patrick von Platen committed
665
        return model
666

667
668
669
670
    @property
    def components(self) -> Dict[str, Any]:
        r"""

Yuta Hayashibe's avatar
Yuta Hayashibe committed
671
        The `self.components` property can be useful to run different pipelines with the same weights and
672
673
674
675
676
677
678
679
680
681
682
        configurations to not have to re-allocate memory.

        Examples:

        ```py
        >>> from diffusers import (
        ...     StableDiffusionPipeline,
        ...     StableDiffusionImg2ImgPipeline,
        ...     StableDiffusionInpaintPipeline,
        ... )

683
        >>> img2text = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5")
684
685
686
687
688
        >>> img2img = StableDiffusionImg2ImgPipeline(**img2text.components)
        >>> inpaint = StableDiffusionInpaintPipeline(**img2text.components)
        ```

        Returns:
Yuta Hayashibe's avatar
Yuta Hayashibe committed
689
            A dictionaly containing all the modules needed to initialize the pipeline.
690
691
692
693
694
695
696
697
698
699
700
701
        """
        components = {k: getattr(self, k) for k in self.config.keys() if not k.startswith("_")}
        expected_modules = set(inspect.signature(self.__init__).parameters.keys()) - set(["self"])

        if set(components.keys()) != expected_modules:
            raise ValueError(
                f"{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected"
                f" {expected_modules} to be defined, but {components} are defined."
            )

        return components

702
703
704
705
706
707
708
709
    @staticmethod
    def numpy_to_pil(images):
        """
        Convert a numpy image or a batch of images to a PIL image.
        """
        if images.ndim == 3:
            images = images[None, ...]
        images = (images * 255).round().astype("uint8")
710
711
712
713
714
        if images.shape[-1] == 1:
            # special case for grayscale (single channel) images
            pil_images = [Image.fromarray(image.squeeze(), mode="L") for image in images]
        else:
            pil_images = [Image.fromarray(image) for image in images]
715
716

        return pil_images
hysts's avatar
hysts committed
717
718
719
720
721
722
723
724
725
726
727
728
729

    def progress_bar(self, iterable):
        if not hasattr(self, "_progress_bar_config"):
            self._progress_bar_config = {}
        elif not isinstance(self._progress_bar_config, dict):
            raise ValueError(
                f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}."
            )

        return tqdm(iterable, **self._progress_bar_config)

    def set_progress_bar_config(self, **kwargs):
        self._progress_bar_config = kwargs