loaders.py 93.2 KB
Newer Older
Patrick von Platen's avatar
Patrick von Platen committed
1
# Copyright 2023 The HuggingFace Team. All rights reserved.
2
3
4
5
6
7
8
9
10
11
12
13
14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
15
import warnings
16
from collections import defaultdict
17
18
from contextlib import nullcontext
from io import BytesIO
1lint's avatar
1lint committed
19
from pathlib import Path
20
from typing import Callable, Dict, List, Optional, Union
21

22
import requests
23
import torch
24
import torch.nn.functional as F
1lint's avatar
1lint committed
25
from huggingface_hub import hf_hub_download
Will Berman's avatar
Will Berman committed
26
from torch import nn
27

28
29
30
31
32
from .utils import (
    DIFFUSERS_CACHE,
    HF_HUB_OFFLINE,
    _get_model_file,
    deprecate,
33
34
    is_accelerate_available,
    is_omegaconf_available,
35
36
37
38
    is_safetensors_available,
    is_transformers_available,
    logging,
)
39
from .utils.import_utils import BACKENDS_MAPPING
40
41
42
43


if is_safetensors_available():
    import safetensors
44

45
if is_transformers_available():
46
    from transformers import CLIPTextModel, CLIPTextModelWithProjection, PreTrainedModel, PreTrainedTokenizer
47

48
49
50
if is_accelerate_available():
    from accelerate import init_empty_weights
    from accelerate.utils import set_module_tensor_to_device
51
52
53

logger = logging.get_logger(__name__)

54
55
TEXT_ENCODER_NAME = "text_encoder"
UNET_NAME = "unet"
56
57

LORA_WEIGHT_NAME = "pytorch_lora_weights.bin"
58
LORA_WEIGHT_NAME_SAFE = "pytorch_lora_weights.safetensors"
59

60
61
62
TEXT_INVERSION_NAME = "learned_embeds.bin"
TEXT_INVERSION_NAME_SAFE = "learned_embeds.safetensors"

63
64
65
CUSTOM_DIFFUSION_WEIGHT_NAME = "pytorch_custom_diffusion_weights.bin"
CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE = "pytorch_custom_diffusion_weights.safetensors"

66

Will Berman's avatar
Will Berman committed
67
68
69
class PatchedLoraProjection(nn.Module):
    def __init__(self, regular_linear_layer, lora_scale=1, network_alpha=None, rank=4, dtype=None):
        super().__init__()
70
71
        from .models.attention_processor import LoRALinearLayer

Will Berman's avatar
Will Berman committed
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
        self.regular_linear_layer = regular_linear_layer

        device = self.regular_linear_layer.weight.device

        if dtype is None:
            dtype = self.regular_linear_layer.weight.dtype

        self.lora_linear_layer = LoRALinearLayer(
            self.regular_linear_layer.in_features,
            self.regular_linear_layer.out_features,
            network_alpha=network_alpha,
            device=device,
            dtype=dtype,
            rank=rank,
        )

        self.lora_scale = lora_scale

    def forward(self, input):
        return self.regular_linear_layer(input) + self.lora_scale * self.lora_linear_layer(input)


def text_encoder_attn_modules(text_encoder):
    attn_modules = []

97
    if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
Will Berman's avatar
Will Berman committed
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
        for i, layer in enumerate(text_encoder.text_model.encoder.layers):
            name = f"text_model.encoder.layers.{i}.self_attn"
            mod = layer.self_attn
            attn_modules.append((name, mod))
    else:
        raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")

    return attn_modules


def text_encoder_lora_state_dict(text_encoder):
    state_dict = {}

    for name, module in text_encoder_attn_modules(text_encoder):
        for k, v in module.q_proj.lora_linear_layer.state_dict().items():
            state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v

        for k, v in module.k_proj.lora_linear_layer.state_dict().items():
            state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v

        for k, v in module.v_proj.lora_linear_layer.state_dict().items():
            state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v

        for k, v in module.out_proj.lora_linear_layer.state_dict().items():
            state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v

    return state_dict


127
128
129
130
class AttnProcsLayers(torch.nn.Module):
    def __init__(self, state_dict: Dict[str, torch.Tensor]):
        super().__init__()
        self.layers = torch.nn.ModuleList(state_dict.values())
131
        self.mapping = dict(enumerate(state_dict.keys()))
132
133
        self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())}

134
135
        # .processor for unet, .self_attn for text encoder
        self.split_keys = [".processor", ".self_attn"]
136

137
138
139
140
141
142
143
144
145
146
147
        # we add a hook to state_dict() and load_state_dict() so that the
        # naming fits with `unet.attn_processors`
        def map_to(module, state_dict, *args, **kwargs):
            new_state_dict = {}
            for key, value in state_dict.items():
                num = int(key.split(".")[1])  # 0 is always "layers"
                new_key = key.replace(f"layers.{num}", module.mapping[num])
                new_state_dict[new_key] = value

            return new_state_dict

148
149
150
151
152
153
154
155
156
        def remap_key(key, state_dict):
            for k in self.split_keys:
                if k in key:
                    return key.split(k)[0] + k

            raise ValueError(
                f"There seems to be a problem with the state_dict: {set(state_dict.keys())}. {key} has to have one of {self.split_keys}."
            )

157
158
159
        def map_from(module, state_dict, *args, **kwargs):
            all_keys = list(state_dict.keys())
            for key in all_keys:
160
                replace_key = remap_key(key, state_dict)
161
162
163
164
165
166
167
168
169
                new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}")
                state_dict[new_key] = state_dict[key]
                del state_dict[key]

        self._register_state_dict_hook(map_to)
        self._register_load_state_dict_pre_hook(map_from, with_module=True)


class UNet2DConditionLoadersMixin:
170
171
172
    text_encoder_name = TEXT_ENCODER_NAME
    unet_name = UNET_NAME

173
174
    def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
        r"""
Steven Liu's avatar
Steven Liu committed
175
        Load pretrained attention processor layers into [`UNet2DConditionModel`]. Attention processor layers have to be
176
        defined in
177
        [`cross_attention.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py)
178
179
180
181
182
183
        and be a `torch.nn.Module` class.

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                Can be either:

Steven Liu's avatar
Steven Liu committed
184
185
186
187
                    - A string, the model id (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
                      the Hub.
                    - A path to a directory (for example `./my_model_directory`) containing the model weights saved
                      with [`ModelMixin.save_pretrained`].
188
189
190
191
                    - A [torch state
                      dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).

            cache_dir (`Union[str, os.PathLike]`, *optional*):
Steven Liu's avatar
Steven Liu committed
192
193
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
194
195
196
197
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download (`bool`, *optional*, defaults to `False`):
Steven Liu's avatar
Steven Liu committed
198
199
                Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
                incompletely downloaded files are deleted.
200
            proxies (`Dict[str, str]`, *optional*):
Steven Liu's avatar
Steven Liu committed
201
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
202
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
Steven Liu's avatar
Steven Liu committed
203
204
205
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
                won't be downloaded from the Hub.
206
            use_auth_token (`str` or *bool*, *optional*):
Steven Liu's avatar
Steven Liu committed
207
208
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
209
            revision (`str`, *optional*, defaults to `"main"`):
Steven Liu's avatar
Steven Liu committed
210
211
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
212
            subfolder (`str`, *optional*, defaults to `""`):
Steven Liu's avatar
Steven Liu committed
213
                The subfolder location of a model file within a larger model repository on the Hub or locally.
214
            mirror (`str`, *optional*):
Steven Liu's avatar
Steven Liu committed
215
216
217
                Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not
                guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
                information.
218
219

        """
220
221
222
223
224
225
226
227
228
229
230
        from .models.attention_processor import (
            AttnAddedKVProcessor,
            AttnAddedKVProcessor2_0,
            CustomDiffusionAttnProcessor,
            LoRAAttnAddedKVProcessor,
            LoRAAttnProcessor,
            LoRAAttnProcessor2_0,
            LoRAXFormersAttnProcessor,
            SlicedAttnAddedKVProcessor,
            XFormersAttnProcessor,
        )
231
232
233
234
235
236
237
238
239

        cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
        force_download = kwargs.pop("force_download", False)
        resume_download = kwargs.pop("resume_download", False)
        proxies = kwargs.pop("proxies", None)
        local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
        use_auth_token = kwargs.pop("use_auth_token", None)
        revision = kwargs.pop("revision", None)
        subfolder = kwargs.pop("subfolder", None)
240
        weight_name = kwargs.pop("weight_name", None)
241
        use_safetensors = kwargs.pop("use_safetensors", None)
242
243
244
        # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.
        # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
        network_alpha = kwargs.pop("network_alpha", None)
245
246
247

        if use_safetensors and not is_safetensors_available():
            raise ValueError(
248
                "`use_safetensors`=True but safetensors is not installed. Please install safetensors with `pip install safetensors"
249
250
251
252
253
254
            )

        allow_pickle = False
        if use_safetensors is None:
            use_safetensors = is_safetensors_available()
            allow_pickle = True
255
256
257
258
259
260

        user_agent = {
            "file_type": "attn_procs_weights",
            "framework": "pytorch",
        }

261
        model_file = None
262
        if not isinstance(pretrained_model_name_or_path_or_dict, dict):
263
            # Let's first try to load .safetensors weights
264
            if (use_safetensors and weight_name is None) or (
265
266
                weight_name is not None and weight_name.endswith(".safetensors")
            ):
267
268
269
                try:
                    model_file = _get_model_file(
                        pretrained_model_name_or_path_or_dict,
270
                        weights_name=weight_name or LORA_WEIGHT_NAME_SAFE,
271
272
273
274
275
276
277
278
279
280
281
                        cache_dir=cache_dir,
                        force_download=force_download,
                        resume_download=resume_download,
                        proxies=proxies,
                        local_files_only=local_files_only,
                        use_auth_token=use_auth_token,
                        revision=revision,
                        subfolder=subfolder,
                        user_agent=user_agent,
                    )
                    state_dict = safetensors.torch.load_file(model_file, device="cpu")
282
283
284
                except IOError as e:
                    if not allow_pickle:
                        raise e
285
286
                    # try loading non-safetensors weights
                    pass
287
288
289
            if model_file is None:
                model_file = _get_model_file(
                    pretrained_model_name_or_path_or_dict,
290
                    weights_name=weight_name or LORA_WEIGHT_NAME,
291
292
293
294
295
296
297
298
299
300
301
                    cache_dir=cache_dir,
                    force_download=force_download,
                    resume_download=resume_download,
                    proxies=proxies,
                    local_files_only=local_files_only,
                    use_auth_token=use_auth_token,
                    revision=revision,
                    subfolder=subfolder,
                    user_agent=user_agent,
                )
                state_dict = torch.load(model_file, map_location="cpu")
302
303
304
305
306
307
308
        else:
            state_dict = pretrained_model_name_or_path_or_dict

        # fill attn processors
        attn_processors = {}

        is_lora = all("lora" in k for k in state_dict.keys())
309
        is_custom_diffusion = any("custom_diffusion" in k for k in state_dict.keys())
310
311

        if is_lora:
312
313
314
315
316
317
318
319
320
321
322
323
            is_new_lora_format = all(
                key.startswith(self.unet_name) or key.startswith(self.text_encoder_name) for key in state_dict.keys()
            )
            if is_new_lora_format:
                # Strip the `"unet"` prefix.
                is_text_encoder_present = any(key.startswith(self.text_encoder_name) for key in state_dict.keys())
                if is_text_encoder_present:
                    warn_message = "The state_dict contains LoRA params corresponding to the text encoder which are not being used here. To use both UNet and text encoder related LoRA params, use [`pipe.load_lora_weights()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraLoaderMixin.load_lora_weights)."
                    warnings.warn(warn_message)
                unet_keys = [k for k in state_dict.keys() if k.startswith(self.unet_name)]
                state_dict = {k.replace(f"{self.unet_name}.", ""): v for k, v in state_dict.items() if k in unet_keys}

324
325
326
327
328
329
330
331
332
            lora_grouped_dict = defaultdict(dict)
            for key, value in state_dict.items():
                attn_processor_key, sub_key = ".".join(key.split(".")[:-3]), ".".join(key.split(".")[-3:])
                lora_grouped_dict[attn_processor_key][sub_key] = value

            for key, value_dict in lora_grouped_dict.items():
                rank = value_dict["to_k_lora.down.weight"].shape[0]
                hidden_size = value_dict["to_k_lora.up.weight"].shape[0]

Will Berman's avatar
Will Berman committed
333
334
335
336
337
338
339
340
341
342
343
                attn_processor = self
                for sub_key in key.split("."):
                    attn_processor = getattr(attn_processor, sub_key)

                if isinstance(
                    attn_processor, (AttnAddedKVProcessor, SlicedAttnAddedKVProcessor, AttnAddedKVProcessor2_0)
                ):
                    cross_attention_dim = value_dict["add_k_proj_lora.down.weight"].shape[1]
                    attn_processor_class = LoRAAttnAddedKVProcessor
                else:
                    cross_attention_dim = value_dict["to_k_lora.down.weight"].shape[1]
344
345
346
                    if isinstance(attn_processor, (XFormersAttnProcessor, LoRAXFormersAttnProcessor)):
                        attn_processor_class = LoRAXFormersAttnProcessor
                    else:
347
348
349
                        attn_processor_class = (
                            LoRAAttnProcessor2_0 if hasattr(F, "scaled_dot_product_attention") else LoRAAttnProcessor
                        )
Will Berman's avatar
Will Berman committed
350
351

                attn_processors[key] = attn_processor_class(
352
353
354
355
                    hidden_size=hidden_size,
                    cross_attention_dim=cross_attention_dim,
                    rank=rank,
                    network_alpha=network_alpha,
356
357
                )
                attn_processors[key].load_state_dict(value_dict)
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
        elif is_custom_diffusion:
            custom_diffusion_grouped_dict = defaultdict(dict)
            for key, value in state_dict.items():
                if len(value) == 0:
                    custom_diffusion_grouped_dict[key] = {}
                else:
                    if "to_out" in key:
                        attn_processor_key, sub_key = ".".join(key.split(".")[:-3]), ".".join(key.split(".")[-3:])
                    else:
                        attn_processor_key, sub_key = ".".join(key.split(".")[:-2]), ".".join(key.split(".")[-2:])
                    custom_diffusion_grouped_dict[attn_processor_key][sub_key] = value

            for key, value_dict in custom_diffusion_grouped_dict.items():
                if len(value_dict) == 0:
                    attn_processors[key] = CustomDiffusionAttnProcessor(
                        train_kv=False, train_q_out=False, hidden_size=None, cross_attention_dim=None
                    )
                else:
                    cross_attention_dim = value_dict["to_k_custom_diffusion.weight"].shape[1]
                    hidden_size = value_dict["to_k_custom_diffusion.weight"].shape[0]
                    train_q_out = True if "to_q_custom_diffusion.weight" in value_dict else False
                    attn_processors[key] = CustomDiffusionAttnProcessor(
                        train_kv=True,
                        train_q_out=train_q_out,
                        hidden_size=hidden_size,
                        cross_attention_dim=cross_attention_dim,
                    )
                    attn_processors[key].load_state_dict(value_dict)
386
        else:
387
388
389
            raise ValueError(
                f"{model_file} does not seem to be in the correct format expected by LoRA or Custom Diffusion training."
            )
390
391
392
393
394
395
396
397
398
399
400

        # set correct dtype & device
        attn_processors = {k: v.to(device=self.device, dtype=self.dtype) for k, v in attn_processors.items()}

        # set layers
        self.set_attn_processor(attn_processors)

    def save_attn_procs(
        self,
        save_directory: Union[str, os.PathLike],
        is_main_process: bool = True,
401
        weight_name: str = None,
402
        save_function: Callable = None,
403
        safe_serialization: bool = False,
404
        **kwargs,
405
406
    ):
        r"""
Steven Liu's avatar
Steven Liu committed
407
        Save an attention processor to a directory so that it can be reloaded using the
408
        [`~loaders.UNet2DConditionLoadersMixin.load_attn_procs`] method.
409
410
411

        Arguments:
            save_directory (`str` or `os.PathLike`):
Steven Liu's avatar
Steven Liu committed
412
                Directory to save an attention processor to. Will be created if it doesn't exist.
413
            is_main_process (`bool`, *optional*, defaults to `True`):
Steven Liu's avatar
Steven Liu committed
414
415
416
                Whether the process calling this is the main process or not. Useful during distributed training and you
                need to call this function on all processes. In this case, set `is_main_process=True` only on the main
                process to avoid race conditions.
417
            save_function (`Callable`):
Steven Liu's avatar
Steven Liu committed
418
419
                The function to use to save the state dictionary. Useful during distributed training when you need to
                replace `torch.save` with another method. Can be configured with the environment variable
420
                `DIFFUSERS_SAVE_MODE`.
Steven Liu's avatar
Steven Liu committed
421

422
        """
423
424
425
426
427
        from .models.attention_processor import (
            CustomDiffusionAttnProcessor,
            CustomDiffusionXFormersAttnProcessor,
        )

428
429
        weight_name = weight_name or deprecate(
            "weights_name",
430
            "0.20.0",
431
432
433
            "`weights_name` is deprecated, please use `weight_name` instead.",
            take_from=kwargs,
        )
434
435
436
437
438
        if os.path.isfile(save_directory):
            logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
            return

        if save_function is None:
439
440
441
442
443
444
445
            if safe_serialization:

                def save_function(weights, filename):
                    return safetensors.torch.save_file(weights, filename, metadata={"format": "pt"})

            else:
                save_function = torch.save
446
447
448

        os.makedirs(save_directory, exist_ok=True)

449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
        is_custom_diffusion = any(
            isinstance(x, (CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor))
            for (_, x) in self.attn_processors.items()
        )
        if is_custom_diffusion:
            model_to_save = AttnProcsLayers(
                {
                    y: x
                    for (y, x) in self.attn_processors.items()
                    if isinstance(x, (CustomDiffusionAttnProcessor, CustomDiffusionXFormersAttnProcessor))
                }
            )
            state_dict = model_to_save.state_dict()
            for name, attn in self.attn_processors.items():
                if len(attn.state_dict()) == 0:
                    state_dict[name] = {}
        else:
            model_to_save = AttnProcsLayers(self.attn_processors)
            state_dict = model_to_save.state_dict()
468

469
        if weight_name is None:
470
            if safe_serialization:
471
                weight_name = CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE if is_custom_diffusion else LORA_WEIGHT_NAME_SAFE
472
            else:
473
                weight_name = CUSTOM_DIFFUSION_WEIGHT_NAME if is_custom_diffusion else LORA_WEIGHT_NAME
474

475
        # Save the model
476
477
        save_function(state_dict, os.path.join(save_directory, weight_name))
        logger.info(f"Model weights saved in {os.path.join(save_directory, weight_name)}")
478
479
480
481


class TextualInversionLoaderMixin:
    r"""
Steven Liu's avatar
Steven Liu committed
482
    Load textual inversion tokens and embeddings to the tokenizer and text encoder.
483
484
    """

485
    def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTrainedTokenizer"):
486
        r"""
Steven Liu's avatar
Steven Liu committed
487
488
489
        Processes prompts that include a special token corresponding to a multi-vector textual inversion embedding to
        be replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual
        inversion token or if the textual inversion token is a single vector, the input prompt is returned.
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511

        Parameters:
            prompt (`str` or list of `str`):
                The prompt or prompts to guide the image generation.
            tokenizer (`PreTrainedTokenizer`):
                The tokenizer responsible for encoding the prompt into input tokens.

        Returns:
            `str` or list of `str`: The converted prompt
        """
        if not isinstance(prompt, List):
            prompts = [prompt]
        else:
            prompts = prompt

        prompts = [self._maybe_convert_prompt(p, tokenizer) for p in prompts]

        if not isinstance(prompt, List):
            return prompts[0]

        return prompts

512
    def _maybe_convert_prompt(self, prompt: str, tokenizer: "PreTrainedTokenizer"):
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
        r"""
        Maybe convert a prompt into a "multi vector"-compatible prompt. If the prompt includes a token that corresponds
        to a multi-vector textual inversion embedding, this function will process the prompt so that the special token
        is replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual
        inversion token or a textual inversion token that is a single vector, the input prompt is simply returned.

        Parameters:
            prompt (`str`):
                The prompt to guide the image generation.
            tokenizer (`PreTrainedTokenizer`):
                The tokenizer responsible for encoding the prompt into input tokens.

        Returns:
            `str`: The converted prompt
        """
        tokens = tokenizer.tokenize(prompt)
529
530
        unique_tokens = set(tokens)
        for token in unique_tokens:
531
532
533
534
            if token in tokenizer.added_tokens_encoder:
                replacement = token
                i = 1
                while f"{token}_{i}" in tokenizer.added_tokens_encoder:
535
                    replacement += f" {token}_{i}"
536
537
538
539
540
541
542
                    i += 1

                prompt = prompt.replace(token, replacement)

        return prompt

    def load_textual_inversion(
543
        self,
544
        pretrained_model_name_or_path: Union[str, List[str], Dict[str, torch.Tensor], List[Dict[str, torch.Tensor]]],
545
546
        token: Optional[Union[str, List[str]]] = None,
        **kwargs,
547
548
    ):
        r"""
Steven Liu's avatar
Steven Liu committed
549
550
        Load textual inversion embeddings into the text encoder of [`StableDiffusionPipeline`] (both 🤗 Diffusers and
        Automatic1111 formats are supported).
551
552

        Parameters:
553
            pretrained_model_name_or_path (`str` or `os.PathLike` or `List[str or os.PathLike]` or `Dict` or `List[Dict]`):
Steven Liu's avatar
Steven Liu committed
554
                Can be either one of the following or a list of them:
555

Steven Liu's avatar
Steven Liu committed
556
557
558
559
560
                    - A string, the *model id* (for example `sd-concepts-library/low-poly-hd-logos-icons`) of a
                      pretrained model hosted on the Hub.
                    - A path to a *directory* (for example `./my_text_inversion_directory/`) containing the textual
                      inversion weights.
                    - A path to a *file* (for example `./my_text_inversions.pt`) containing textual inversion weights.
561
562
                    - A [torch state
                      dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
563
564
565
566

            token (`str` or `List[str]`, *optional*):
                Override the token to use for the textual inversion weights. If `pretrained_model_name_or_path` is a
                list, then `token` must also be a list of equal length.
567
            weight_name (`str`, *optional*):
Steven Liu's avatar
Steven Liu committed
568
                Name of a custom weight file. This should be used when:
569

Steven Liu's avatar
Steven Liu committed
570
571
572
                    - The saved textual inversion file is in 🤗 Diffusers format, but was saved under a specific weight
                      name such as `text_inv.bin`.
                    - The saved textual inversion file is in the Automatic1111 format.
573
            cache_dir (`Union[str, os.PathLike]`, *optional*):
Steven Liu's avatar
Steven Liu committed
574
575
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
576
577
578
579
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download (`bool`, *optional*, defaults to `False`):
Steven Liu's avatar
Steven Liu committed
580
581
                Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
                incompletely downloaded files are deleted.
582
            proxies (`Dict[str, str]`, *optional*):
Steven Liu's avatar
Steven Liu committed
583
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
584
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
Steven Liu's avatar
Steven Liu committed
585
586
587
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
                won't be downloaded from the Hub.
588
            use_auth_token (`str` or *bool*, *optional*):
Steven Liu's avatar
Steven Liu committed
589
590
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
591
            revision (`str`, *optional*, defaults to `"main"`):
Steven Liu's avatar
Steven Liu committed
592
593
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
594
            subfolder (`str`, *optional*, defaults to `""`):
Steven Liu's avatar
Steven Liu committed
595
                The subfolder location of a model file within a larger model repository on the Hub or locally.
596
            mirror (`str`, *optional*):
Steven Liu's avatar
Steven Liu committed
597
598
599
                Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
                guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
                information.
600
601
602

        Example:

Steven Liu's avatar
Steven Liu committed
603
        To load a textual inversion embedding vector in 🤗 Diffusers format:
1lint's avatar
1lint committed
604

605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
        ```py
        from diffusers import StableDiffusionPipeline
        import torch

        model_id = "runwayml/stable-diffusion-v1-5"
        pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")

        pipe.load_textual_inversion("sd-concepts-library/cat-toy")

        prompt = "A <cat-toy> backpack"

        image = pipe(prompt, num_inference_steps=50).images[0]
        image.save("cat-backpack.png")
        ```

Steven Liu's avatar
Steven Liu committed
620
621
622
        To load a textual inversion embedding vector in Automatic1111 format, make sure to download the vector first
        (for example from [civitAI](https://civitai.com/models/3036?modelVersionId=9857)) and then load the vector
        locally:
623
624
625
626
627
628
629
630

        ```py
        from diffusers import StableDiffusionPipeline
        import torch

        model_id = "runwayml/stable-diffusion-v1-5"
        pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")

631
        pipe.load_textual_inversion("./charturnerv2.pt", token="charturnerv2")
632
633
634
635
636
637

        prompt = "charturnerv2, multiple views of the same character in the same outfit, a character turnaround of a woman wearing a black jacket and red shirt, best quality, intricate details."

        image = pipe(prompt, num_inference_steps=50).images[0]
        image.save("character.png")
        ```
1lint's avatar
1lint committed
638

639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
        """
        if not hasattr(self, "tokenizer") or not isinstance(self.tokenizer, PreTrainedTokenizer):
            raise ValueError(
                f"{self.__class__.__name__} requires `self.tokenizer` of type `PreTrainedTokenizer` for calling"
                f" `{self.load_textual_inversion.__name__}`"
            )

        if not hasattr(self, "text_encoder") or not isinstance(self.text_encoder, PreTrainedModel):
            raise ValueError(
                f"{self.__class__.__name__} requires `self.text_encoder` of type `PreTrainedModel` for calling"
                f" `{self.load_textual_inversion.__name__}`"
            )

        cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
        force_download = kwargs.pop("force_download", False)
        resume_download = kwargs.pop("resume_download", False)
        proxies = kwargs.pop("proxies", None)
        local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
        use_auth_token = kwargs.pop("use_auth_token", None)
        revision = kwargs.pop("revision", None)
        subfolder = kwargs.pop("subfolder", None)
        weight_name = kwargs.pop("weight_name", None)
        use_safetensors = kwargs.pop("use_safetensors", None)

        if use_safetensors and not is_safetensors_available():
            raise ValueError(
665
                "`use_safetensors`=True but safetensors is not installed. Please install safetensors with `pip install safetensors"
666
667
668
669
670
671
672
673
674
675
676
677
            )

        allow_pickle = False
        if use_safetensors is None:
            use_safetensors = is_safetensors_available()
            allow_pickle = True

        user_agent = {
            "file_type": "text_inversion",
            "framework": "pytorch",
        }

678
        if not isinstance(pretrained_model_name_or_path, list):
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
            pretrained_model_name_or_paths = [pretrained_model_name_or_path]
        else:
            pretrained_model_name_or_paths = pretrained_model_name_or_path

        if isinstance(token, str):
            tokens = [token]
        elif token is None:
            tokens = [None] * len(pretrained_model_name_or_paths)
        else:
            tokens = token

        if len(pretrained_model_name_or_paths) != len(tokens):
            raise ValueError(
                f"You have passed a list of models of length {len(pretrained_model_name_or_paths)}, and list of tokens of length {len(tokens)}"
                f"Make sure both lists have the same length."
            )

        valid_tokens = [t for t in tokens if t is not None]
        if len(set(valid_tokens)) < len(valid_tokens):
            raise ValueError(f"You have passed a list of tokens that contains duplicates: {tokens}")

        token_ids_and_embeddings = []

        for pretrained_model_name_or_path, token in zip(pretrained_model_name_or_paths, tokens):
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
            if not isinstance(pretrained_model_name_or_path, dict):
                # 1. Load textual inversion file
                model_file = None
                # Let's first try to load .safetensors weights
                if (use_safetensors and weight_name is None) or (
                    weight_name is not None and weight_name.endswith(".safetensors")
                ):
                    try:
                        model_file = _get_model_file(
                            pretrained_model_name_or_path,
                            weights_name=weight_name or TEXT_INVERSION_NAME_SAFE,
                            cache_dir=cache_dir,
                            force_download=force_download,
                            resume_download=resume_download,
                            proxies=proxies,
                            local_files_only=local_files_only,
                            use_auth_token=use_auth_token,
                            revision=revision,
                            subfolder=subfolder,
                            user_agent=user_agent,
                        )
                        state_dict = safetensors.torch.load_file(model_file, device="cpu")
                    except Exception as e:
                        if not allow_pickle:
                            raise e

                        model_file = None

                if model_file is None:
732
733
                    model_file = _get_model_file(
                        pretrained_model_name_or_path,
734
                        weights_name=weight_name or TEXT_INVERSION_NAME,
735
736
737
738
739
740
741
742
743
744
                        cache_dir=cache_dir,
                        force_download=force_download,
                        resume_download=resume_download,
                        proxies=proxies,
                        local_files_only=local_files_only,
                        use_auth_token=use_auth_token,
                        revision=revision,
                        subfolder=subfolder,
                        user_agent=user_agent,
                    )
745
746
747
                    state_dict = torch.load(model_file, map_location="cpu")
            else:
                state_dict = pretrained_model_name_or_path
748
749

            # 2. Load token and embedding correcly from file
750
            loaded_token = None
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
            if isinstance(state_dict, torch.Tensor):
                if token is None:
                    raise ValueError(
                        "You are trying to load a textual inversion embedding that has been saved as a PyTorch tensor. Make sure to pass the name of the corresponding token in this case: `token=...`."
                    )
                embedding = state_dict
            elif len(state_dict) == 1:
                # diffusers
                loaded_token, embedding = next(iter(state_dict.items()))
            elif "string_to_param" in state_dict:
                # A1111
                loaded_token = state_dict["name"]
                embedding = state_dict["string_to_param"]["*"]

            if token is not None and loaded_token != token:
                logger.info(f"The loaded token: {loaded_token} is overwritten by the passed token {token}.")
            else:
                token = loaded_token

            embedding = embedding.to(dtype=self.text_encoder.dtype, device=self.text_encoder.device)
771

772
773
774
            # 3. Make sure we don't mess up the tokenizer or text encoder
            vocab = self.tokenizer.get_vocab()
            if token in vocab:
775
                raise ValueError(
776
                    f"Token {token} already in tokenizer vocabulary. Please choose a different token name or remove {token} and embedding from the tokenizer and text encoder."
777
                )
778
779
780
781
782
783
            elif f"{token}_1" in vocab:
                multi_vector_tokens = [token]
                i = 1
                while f"{token}_{i}" in self.tokenizer.added_tokens_encoder:
                    multi_vector_tokens.append(f"{token}_{i}")
                    i += 1
784

785
786
787
                raise ValueError(
                    f"Multi-vector Token {multi_vector_tokens} already in tokenizer vocabulary. Please choose a different token name or remove the {multi_vector_tokens} and embedding from the tokenizer and text encoder."
                )
788

789
            is_multi_vector = len(embedding.shape) > 1 and embedding.shape[0] > 1
790

791
792
793
794
795
796
            if is_multi_vector:
                tokens = [token] + [f"{token}_{i}" for i in range(1, embedding.shape[0])]
                embeddings = [e for e in embedding]  # noqa: C416
            else:
                tokens = [token]
                embeddings = [embedding[0]] if len(embedding.shape) > 1 else [embedding]
797

798
799
800
801
            # add tokens and get ids
            self.tokenizer.add_tokens(tokens)
            token_ids = self.tokenizer.convert_tokens_to_ids(tokens)
            token_ids_and_embeddings += zip(token_ids, embeddings)
802

803
            logger.info(f"Loaded textual inversion embedding for {token}.")
804

805
        # resize token embeddings and set all new embeddings
806
        self.text_encoder.resize_token_embeddings(len(self.tokenizer))
807
        for token_id, embedding in token_ids_and_embeddings:
808
809
            self.text_encoder.get_input_embeddings().weight.data[token_id] = embedding

810
811
812

class LoraLoaderMixin:
    r"""
Steven Liu's avatar
Steven Liu committed
813
814
    Load LoRA layers into [`UNet2DConditionModel`] and
    [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel).
815
    """
816
817
    text_encoder_name = TEXT_ENCODER_NAME
    unet_name = UNET_NAME
818
819

    def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
Will Berman's avatar
Will Berman committed
820
        """
821
822
        Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
        `self.text_encoder`.
Will Berman's avatar
Will Berman committed
823
824
825
826
827
828
829
830
831
832
833
834
835
836

        All kwargs are forwarded to `self.lora_state_dict`.

        See [`~loaders.LoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.

        See [`~loaders.LoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into
        `self.unet`.

        See [`~loaders.LoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded
        into `self.text_encoder`.

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                See [`~loaders.LoraLoaderMixin.lora_state_dict`].
837
            kwargs (`dict`, *optional*):
Will Berman's avatar
Will Berman committed
838
839
840
841
842
843
844
845
846
847
848
849
850
851
                See [`~loaders.LoraLoaderMixin.lora_state_dict`].
        """
        state_dict, network_alpha = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs)
        self.load_lora_into_unet(state_dict, network_alpha=network_alpha, unet=self.unet)
        self.load_lora_into_text_encoder(
            state_dict, network_alpha=network_alpha, text_encoder=self.text_encoder, lora_scale=self.lora_scale
        )

    @classmethod
    def lora_state_dict(
        cls,
        pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
        **kwargs,
    ):
852
        r"""
Will Berman's avatar
Will Berman committed
853
854
855
856
857
858
859
860
861
        Return state dict for lora weights

        <Tip warning={true}>

        We support loading A1111 formatted LoRA checkpoints in a limited capacity.

        This function is experimental and might change in the future.

        </Tip>
862
863
864
865
866

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                Can be either:

Steven Liu's avatar
Steven Liu committed
867
868
869
870
                    - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
                      the Hub.
                    - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
                      with [`ModelMixin.save_pretrained`].
871
872
873
874
                    - A [torch state
                      dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).

            cache_dir (`Union[str, os.PathLike]`, *optional*):
Steven Liu's avatar
Steven Liu committed
875
876
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
877
878
879
880
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download (`bool`, *optional*, defaults to `False`):
Steven Liu's avatar
Steven Liu committed
881
882
                Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
                incompletely downloaded files are deleted.
883
            proxies (`Dict[str, str]`, *optional*):
Steven Liu's avatar
Steven Liu committed
884
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
885
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
Steven Liu's avatar
Steven Liu committed
886
887
888
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
                won't be downloaded from the Hub.
889
            use_auth_token (`str` or *bool*, *optional*):
Steven Liu's avatar
Steven Liu committed
890
891
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
892
            revision (`str`, *optional*, defaults to `"main"`):
Steven Liu's avatar
Steven Liu committed
893
894
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
895
            subfolder (`str`, *optional*, defaults to `""`):
Steven Liu's avatar
Steven Liu committed
896
                The subfolder location of a model file within a larger model repository on the Hub or locally.
897
            mirror (`str`, *optional*):
Steven Liu's avatar
Steven Liu committed
898
899
900
                Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
                guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
                information.
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917

        """
        # Load the main state dict first which has the LoRA layers for either of
        # UNet and text encoder or both.
        cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
        force_download = kwargs.pop("force_download", False)
        resume_download = kwargs.pop("resume_download", False)
        proxies = kwargs.pop("proxies", None)
        local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
        use_auth_token = kwargs.pop("use_auth_token", None)
        revision = kwargs.pop("revision", None)
        subfolder = kwargs.pop("subfolder", None)
        weight_name = kwargs.pop("weight_name", None)
        use_safetensors = kwargs.pop("use_safetensors", None)

        if use_safetensors and not is_safetensors_available():
            raise ValueError(
918
                "`use_safetensors`=True but safetensors is not installed. Please install safetensors with `pip install safetensors"
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
            )

        allow_pickle = False
        if use_safetensors is None:
            use_safetensors = is_safetensors_available()
            allow_pickle = True

        user_agent = {
            "file_type": "attn_procs_weights",
            "framework": "pytorch",
        }

        model_file = None
        if not isinstance(pretrained_model_name_or_path_or_dict, dict):
            # Let's first try to load .safetensors weights
            if (use_safetensors and weight_name is None) or (
                weight_name is not None and weight_name.endswith(".safetensors")
            ):
                try:
                    model_file = _get_model_file(
                        pretrained_model_name_or_path_or_dict,
                        weights_name=weight_name or LORA_WEIGHT_NAME_SAFE,
                        cache_dir=cache_dir,
                        force_download=force_download,
                        resume_download=resume_download,
                        proxies=proxies,
                        local_files_only=local_files_only,
                        use_auth_token=use_auth_token,
                        revision=revision,
                        subfolder=subfolder,
                        user_agent=user_agent,
                    )
                    state_dict = safetensors.torch.load_file(model_file, device="cpu")
Will Berman's avatar
Will Berman committed
952
                except (IOError, safetensors.SafetensorError) as e:
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
                    if not allow_pickle:
                        raise e
                    # try loading non-safetensors weights
                    pass
            if model_file is None:
                model_file = _get_model_file(
                    pretrained_model_name_or_path_or_dict,
                    weights_name=weight_name or LORA_WEIGHT_NAME,
                    cache_dir=cache_dir,
                    force_download=force_download,
                    resume_download=resume_download,
                    proxies=proxies,
                    local_files_only=local_files_only,
                    use_auth_token=use_auth_token,
                    revision=revision,
                    subfolder=subfolder,
                    user_agent=user_agent,
                )
                state_dict = torch.load(model_file, map_location="cpu")
        else:
            state_dict = pretrained_model_name_or_path_or_dict

975
976
977
        # Convert kohya-ss Style LoRA attn procs to diffusers attn procs
        network_alpha = None
        if all((k.startswith("lora_te_") or k.startswith("lora_unet_")) for k in state_dict.keys()):
Will Berman's avatar
Will Berman committed
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
            state_dict, network_alpha = cls._convert_kohya_lora_to_diffusers(state_dict)

        return state_dict, network_alpha

    @classmethod
    def load_lora_into_unet(cls, state_dict, network_alpha, unet):
        """
        This will load the LoRA layers specified in `state_dict` into `unet`

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
            network_alpha (`float`):
                See `LoRALinearLayer` for more details.
            unet (`UNet2DConditionModel`):
                The UNet model to load the LoRA layers into.
        """
997

998
999
1000
1001
        # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918),
        # then the `state_dict` keys should have `self.unet_name` and/or `self.text_encoder_name` as
        # their prefixes.
        keys = list(state_dict.keys())
Will Berman's avatar
Will Berman committed
1002
        if all(key.startswith(cls.unet_name) or key.startswith(cls.text_encoder_name) for key in keys):
1003
            # Load the layers corresponding to UNet.
Will Berman's avatar
Will Berman committed
1004
1005
            unet_keys = [k for k in keys if k.startswith(cls.unet_name)]
            logger.info(f"Loading {cls.unet_name}.")
1006
            unet_lora_state_dict = {
Will Berman's avatar
Will Berman committed
1007
                k.replace(f"{cls.unet_name}.", ""): v for k, v in state_dict.items() if k in unet_keys
1008
            }
Will Berman's avatar
Will Berman committed
1009
            unet.load_attn_procs(unet_lora_state_dict, network_alpha=network_alpha)
1010

1011
1012
1013
        # Otherwise, we're dealing with the old format. This means the `state_dict` should only
        # contain the module names of the `unet` as its keys WITHOUT any prefix.
        elif not all(
Will Berman's avatar
Will Berman committed
1014
            key.startswith(cls.unet_name) or key.startswith(cls.text_encoder_name) for key in state_dict.keys()
1015
        ):
1016
            unet.load_attn_procs(state_dict, network_alpha=network_alpha)
1017
1018
            warn_message = "You have saved the LoRA weights using the old format. To convert the old LoRA weights to the new format, you can first load them in a dictionary and then create a new dictionary like the following: `new_state_dict = {f'unet'.{module_name}: params for module_name, params in old_state_dict.items()}`."
            warnings.warn(warn_message)
1019

Will Berman's avatar
Will Berman committed
1020
    @classmethod
1021
    def load_lora_into_text_encoder(cls, state_dict, network_alpha, text_encoder, prefix=None, lora_scale=1.0):
Will Berman's avatar
Will Berman committed
1022
1023
1024
1025
1026
        """
        This will load the LoRA layers specified in `state_dict` into `text_encoder`

        Parameters:
            state_dict (`dict`):
1027
                A standard state dict containing the lora layer parameters. The key should be prefixed with an
Will Berman's avatar
Will Berman committed
1028
1029
1030
1031
1032
                additional `text_encoder` to distinguish between unet lora layers.
            network_alpha (`float`):
                See `LoRALinearLayer` for more details.
            text_encoder (`CLIPTextModel`):
                The text encoder model to load the LoRA layers into.
1033
1034
            prefix (`str`):
                Expected prefix of the `text_encoder` in the `state_dict`.
Will Berman's avatar
Will Berman committed
1035
1036
1037
1038
1039
1040
1041
1042
1043
            lora_scale (`float`):
                How much to scale the output of the lora linear layer before it is added with the output of the regular
                lora layer.
        """

        # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918),
        # then the `state_dict` keys should have `self.unet_name` and/or `self.text_encoder_name` as
        # their prefixes.
        keys = list(state_dict.keys())
1044
1045
1046
        prefix = cls.text_encoder_name if prefix is None else prefix

        if any(cls.text_encoder_name in key for key in keys):
Will Berman's avatar
Will Berman committed
1047
            # Load the layers corresponding to text encoder and make necessary adjustments.
1048
            text_encoder_keys = [k for k in keys if k.startswith(prefix)]
Will Berman's avatar
Will Berman committed
1049
            text_encoder_lora_state_dict = {
1050
                k.replace(f"{prefix}.", ""): v for k, v in state_dict.items() if k in text_encoder_keys
Will Berman's avatar
Will Berman committed
1051
1052
            }
            if len(text_encoder_lora_state_dict) > 0:
1053
                logger.info(f"Loading {prefix}.")
Will Berman's avatar
Will Berman committed
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109

                if any("to_out_lora" in k for k in text_encoder_lora_state_dict.keys()):
                    # Convert from the old naming convention to the new naming convention.
                    #
                    # Previously, the old LoRA layers were stored on the state dict at the
                    # same level as the attention block i.e.
                    # `text_model.encoder.layers.11.self_attn.to_out_lora.up.weight`.
                    #
                    # This is no actual module at that point, they were monkey patched on to the
                    # existing module. We want to be able to load them via their actual state dict.
                    # They're in `PatchedLoraProjection.lora_linear_layer` now.
                    for name, _ in text_encoder_attn_modules(text_encoder):
                        text_encoder_lora_state_dict[
                            f"{name}.q_proj.lora_linear_layer.up.weight"
                        ] = text_encoder_lora_state_dict.pop(f"{name}.to_q_lora.up.weight")
                        text_encoder_lora_state_dict[
                            f"{name}.k_proj.lora_linear_layer.up.weight"
                        ] = text_encoder_lora_state_dict.pop(f"{name}.to_k_lora.up.weight")
                        text_encoder_lora_state_dict[
                            f"{name}.v_proj.lora_linear_layer.up.weight"
                        ] = text_encoder_lora_state_dict.pop(f"{name}.to_v_lora.up.weight")
                        text_encoder_lora_state_dict[
                            f"{name}.out_proj.lora_linear_layer.up.weight"
                        ] = text_encoder_lora_state_dict.pop(f"{name}.to_out_lora.up.weight")

                        text_encoder_lora_state_dict[
                            f"{name}.q_proj.lora_linear_layer.down.weight"
                        ] = text_encoder_lora_state_dict.pop(f"{name}.to_q_lora.down.weight")
                        text_encoder_lora_state_dict[
                            f"{name}.k_proj.lora_linear_layer.down.weight"
                        ] = text_encoder_lora_state_dict.pop(f"{name}.to_k_lora.down.weight")
                        text_encoder_lora_state_dict[
                            f"{name}.v_proj.lora_linear_layer.down.weight"
                        ] = text_encoder_lora_state_dict.pop(f"{name}.to_v_lora.down.weight")
                        text_encoder_lora_state_dict[
                            f"{name}.out_proj.lora_linear_layer.down.weight"
                        ] = text_encoder_lora_state_dict.pop(f"{name}.to_out_lora.down.weight")

                rank = text_encoder_lora_state_dict[
                    "text_model.encoder.layers.0.self_attn.out_proj.lora_linear_layer.up.weight"
                ].shape[1]

                cls._modify_text_encoder(text_encoder, lora_scale, network_alpha, rank=rank)

                # set correct dtype & device
                text_encoder_lora_state_dict = {
                    k: v.to(device=text_encoder.device, dtype=text_encoder.dtype)
                    for k, v in text_encoder_lora_state_dict.items()
                }

                load_state_dict_results = text_encoder.load_state_dict(text_encoder_lora_state_dict, strict=False)
                if len(load_state_dict_results.unexpected_keys) != 0:
                    raise ValueError(
                        f"failed to load text encoder state dict, unexpected keys: {load_state_dict_results.unexpected_keys}"
                    )

1110
1111
1112
1113
1114
1115
    @property
    def lora_scale(self) -> float:
        # property function that returns the lora scale which can be set at run time by the pipeline.
        # if _lora_scale has not been set, return 1
        return self._lora_scale if hasattr(self, "_lora_scale") else 1.0

1116
    def _remove_text_encoder_monkey_patch(self):
Will Berman's avatar
Will Berman committed
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
        self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)

    @classmethod
    def _remove_text_encoder_monkey_patch_classmethod(cls, text_encoder):
        for _, attn_module in text_encoder_attn_modules(text_encoder):
            if isinstance(attn_module.q_proj, PatchedLoraProjection):
                attn_module.q_proj = attn_module.q_proj.regular_linear_layer
                attn_module.k_proj = attn_module.k_proj.regular_linear_layer
                attn_module.v_proj = attn_module.v_proj.regular_linear_layer
                attn_module.out_proj = attn_module.out_proj.regular_linear_layer

    @classmethod
    def _modify_text_encoder(cls, text_encoder, lora_scale=1, network_alpha=None, rank=4, dtype=None):
1130
1131
1132
        r"""
        Monkey-patches the forward passes of attention modules of the text encoder.
        """
1133
1134

        # First, remove any monkey-patch that might have been applied before
Will Berman's avatar
Will Berman committed
1135
        cls._remove_text_encoder_monkey_patch_classmethod(text_encoder)
1136

Will Berman's avatar
Will Berman committed
1137
        lora_parameters = []
1138

Will Berman's avatar
Will Berman committed
1139
1140
1141
        for _, attn_module in text_encoder_attn_modules(text_encoder):
            attn_module.q_proj = PatchedLoraProjection(
                attn_module.q_proj, lora_scale, network_alpha, rank=rank, dtype=dtype
1142
            )
Will Berman's avatar
Will Berman committed
1143
            lora_parameters.extend(attn_module.q_proj.lora_linear_layer.parameters())
1144

Will Berman's avatar
Will Berman committed
1145
1146
1147
1148
            attn_module.k_proj = PatchedLoraProjection(
                attn_module.k_proj, lora_scale, network_alpha, rank=rank, dtype=dtype
            )
            lora_parameters.extend(attn_module.k_proj.lora_linear_layer.parameters())
1149

Will Berman's avatar
Will Berman committed
1150
1151
1152
1153
            attn_module.v_proj = PatchedLoraProjection(
                attn_module.v_proj, lora_scale, network_alpha, rank=rank, dtype=dtype
            )
            lora_parameters.extend(attn_module.v_proj.lora_linear_layer.parameters())
1154

Will Berman's avatar
Will Berman committed
1155
1156
1157
1158
            attn_module.out_proj = PatchedLoraProjection(
                attn_module.out_proj, lora_scale, network_alpha, rank=rank, dtype=dtype
            )
            lora_parameters.extend(attn_module.out_proj.lora_linear_layer.parameters())
1159

Will Berman's avatar
Will Berman committed
1160
        return lora_parameters
1161
1162
1163
1164
1165

    @classmethod
    def save_lora_weights(
        self,
        save_directory: Union[str, os.PathLike],
1166
        unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1167
1168
1169
1170
1171
1172
1173
        text_encoder_lora_layers: Dict[str, torch.nn.Module] = None,
        is_main_process: bool = True,
        weight_name: str = None,
        save_function: Callable = None,
        safe_serialization: bool = False,
    ):
        r"""
Steven Liu's avatar
Steven Liu committed
1174
        Save the LoRA parameters corresponding to the UNet and text encoder.
1175
1176
1177

        Arguments:
            save_directory (`str` or `os.PathLike`):
Steven Liu's avatar
Steven Liu committed
1178
                Directory to save LoRA parameters to. Will be created if it doesn't exist.
1179
            unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
1180
1181
                State dict of the LoRA layers corresponding to the `unet`.
            text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
Steven Liu's avatar
Steven Liu committed
1182
                State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
1183
                encoder LoRA state dict because it comes from 🤗 Transformers.
1184
            is_main_process (`bool`, *optional*, defaults to `True`):
Steven Liu's avatar
Steven Liu committed
1185
1186
1187
                Whether the process calling this is the main process or not. Useful during distributed training and you
                need to call this function on all processes. In this case, set `is_main_process=True` only on the main
                process to avoid race conditions.
1188
            save_function (`Callable`):
Steven Liu's avatar
Steven Liu committed
1189
1190
                The function to use to save the state dictionary. Useful during distributed training when you need to
                replace `torch.save` with another method. Can be configured with the environment variable
1191
1192
1193
1194
                `DIFFUSERS_SAVE_MODE`.
        """
        # Create a flat dictionary.
        state_dict = {}
1195
1196

        # Populate the dictionary.
1197
        if unet_lora_layers is not None:
1198
1199
1200
1201
1202
            weights = (
                unet_lora_layers.state_dict() if isinstance(unet_lora_layers, torch.nn.Module) else unet_lora_layers
            )

            unet_lora_state_dict = {f"{self.unet_name}.{module_name}": param for module_name, param in weights.items()}
1203
            state_dict.update(unet_lora_state_dict)
1204

1205
        if text_encoder_lora_layers is not None:
1206
1207
1208
1209
1210
1211
            weights = (
                text_encoder_lora_layers.state_dict()
                if isinstance(text_encoder_lora_layers, torch.nn.Module)
                else text_encoder_lora_layers
            )

1212
            text_encoder_lora_state_dict = {
1213
                f"{self.text_encoder_name}.{module_name}": param for module_name, param in weights.items()
1214
1215
1216
1217
            }
            state_dict.update(text_encoder_lora_state_dict)

        # Save the model
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
        self.write_lora_layers(
            state_dict=state_dict,
            save_directory=save_directory,
            is_main_process=is_main_process,
            weight_name=weight_name,
            save_function=save_function,
            safe_serialization=safe_serialization,
        )

    def write_lora_layers(
        state_dict: Dict[str, torch.Tensor],
        save_directory: str,
        is_main_process: bool,
        weight_name: str,
        save_function: Callable,
        safe_serialization: bool,
    ):
        if os.path.isfile(save_directory):
            logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
            return

        if save_function is None:
            if safe_serialization:

                def save_function(weights, filename):
                    return safetensors.torch.save_file(weights, filename, metadata={"format": "pt"})

            else:
                save_function = torch.save

        os.makedirs(save_directory, exist_ok=True)

1250
1251
1252
1253
1254
1255
1256
1257
        if weight_name is None:
            if safe_serialization:
                weight_name = LORA_WEIGHT_NAME_SAFE
            else:
                weight_name = LORA_WEIGHT_NAME

        save_function(state_dict, os.path.join(save_directory, weight_name))
        logger.info(f"Model weights saved in {os.path.join(save_directory, weight_name)}")
1lint's avatar
1lint committed
1258

Will Berman's avatar
Will Berman committed
1259
1260
    @classmethod
    def _convert_kohya_lora_to_diffusers(cls, state_dict):
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
        unet_state_dict = {}
        te_state_dict = {}
        network_alpha = None

        for key, value in state_dict.items():
            if "lora_down" in key:
                lora_name = key.split(".")[0]
                lora_name_up = lora_name + ".lora_up.weight"
                lora_name_alpha = lora_name + ".alpha"
                if lora_name_alpha in state_dict:
                    alpha = state_dict[lora_name_alpha].item()
                    if network_alpha is None:
                        network_alpha = alpha
                    elif network_alpha != alpha:
                        raise ValueError("Network alpha is not consistent")

                if lora_name.startswith("lora_unet_"):
                    diffusers_name = key.replace("lora_unet_", "").replace("_", ".")
                    diffusers_name = diffusers_name.replace("down.blocks", "down_blocks")
                    diffusers_name = diffusers_name.replace("mid.block", "mid_block")
                    diffusers_name = diffusers_name.replace("up.blocks", "up_blocks")
                    diffusers_name = diffusers_name.replace("transformer.blocks", "transformer_blocks")
                    diffusers_name = diffusers_name.replace("to.q.lora", "to_q_lora")
                    diffusers_name = diffusers_name.replace("to.k.lora", "to_k_lora")
                    diffusers_name = diffusers_name.replace("to.v.lora", "to_v_lora")
                    diffusers_name = diffusers_name.replace("to.out.0.lora", "to_out_lora")
                    if "transformer_blocks" in diffusers_name:
                        if "attn1" in diffusers_name or "attn2" in diffusers_name:
                            diffusers_name = diffusers_name.replace("attn1", "attn1.processor")
                            diffusers_name = diffusers_name.replace("attn2", "attn2.processor")
                            unet_state_dict[diffusers_name] = value
                            unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict[lora_name_up]
                elif lora_name.startswith("lora_te_"):
                    diffusers_name = key.replace("lora_te_", "").replace("_", ".")
                    diffusers_name = diffusers_name.replace("text.model", "text_model")
                    diffusers_name = diffusers_name.replace("self.attn", "self_attn")
                    diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
                    diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
                    diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
                    diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
                    if "self_attn" in diffusers_name:
                        te_state_dict[diffusers_name] = value
                        te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict[lora_name_up]

        unet_state_dict = {f"{UNET_NAME}.{module_name}": params for module_name, params in unet_state_dict.items()}
        te_state_dict = {f"{TEXT_ENCODER_NAME}.{module_name}": params for module_name, params in te_state_dict.items()}
        new_state_dict = {**unet_state_dict, **te_state_dict}
        return new_state_dict, network_alpha

1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
    def unload_lora_weights(self):
        """
        Unloads the LoRA parameters.

        Examples:

        ```python
        >>> # Assuming `pipeline` is already loaded with the LoRA parameters.
        >>> pipeline.unload_lora_weights()
        >>> ...
        ```
        """
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
        from .models.attention_processor import (
            LORA_ATTENTION_PROCESSORS,
            AttnProcessor,
            AttnProcessor2_0,
            LoRAAttnAddedKVProcessor,
            LoRAAttnProcessor,
            LoRAAttnProcessor2_0,
            LoRAXFormersAttnProcessor,
            XFormersAttnProcessor,
        )

1333
1334
1335
1336
1337
1338
        unet_attention_classes = {type(processor) for _, processor in self.unet.attn_processors.items()}

        if unet_attention_classes.issubset(LORA_ATTENTION_PROCESSORS):
            # Handle attention processors that are a mix of regular attention and AddedKV
            # attention.
            if len(unet_attention_classes) > 1 or LoRAAttnAddedKVProcessor in unet_attention_classes:
1339
                self.unet.set_default_attn_processor()
1340
1341
1342
1343
1344
1345
1346
1347
            else:
                regular_attention_classes = {
                    LoRAAttnProcessor: AttnProcessor,
                    LoRAAttnProcessor2_0: AttnProcessor2_0,
                    LoRAXFormersAttnProcessor: XFormersAttnProcessor,
                }
                [attention_proc_class] = unet_attention_classes
                self.unet.set_attn_processor(regular_attention_classes[attention_proc_class]())
1348
1349
1350
1351

        # Safe to call the following regardless of LoRA.
        self._remove_text_encoder_monkey_patch()

1lint's avatar
1lint committed
1352

Patrick von Platen's avatar
Patrick von Platen committed
1353
class FromSingleFileMixin:
Steven Liu's avatar
Steven Liu committed
1354
1355
1356
    """
    Load model weights saved in the `.ckpt` format into a [`DiffusionPipeline`].
    """
1lint's avatar
1lint committed
1357
1358

    @classmethod
Patrick von Platen's avatar
Patrick von Platen committed
1359
1360
1361
1362
1363
1364
1365
    def from_ckpt(cls, *args, **kwargs):
        deprecation_message = "The function `from_ckpt` is deprecated in favor of `from_single_file` and will be removed in diffusers v.0.21. Please make sure to use `StableDiffusionPipeline.from_single_file(...)` instead."
        deprecate("from_ckpt", "0.21.0", deprecation_message, standard_warn=False)
        return cls.from_single_file(*args, **kwargs)

    @classmethod
    def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
1lint's avatar
1lint committed
1366
        r"""
1367
1368
        Instantiate a [`DiffusionPipeline`] from pretrained pipeline weights saved in the `.ckpt` or `.safetensors`
        format. The pipeline is set in evaluation mode (`model.eval()`) by default.
1lint's avatar
1lint committed
1369
1370
1371
1372

        Parameters:
            pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
                Can be either:
Steven Liu's avatar
Steven Liu committed
1373
1374
                    - A link to the `.ckpt` file (for example
                      `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
1lint's avatar
1lint committed
1375
1376
                    - A path to a *file* containing all pipeline weights.
            torch_dtype (`str` or `torch.dtype`, *optional*):
Steven Liu's avatar
Steven Liu committed
1377
1378
                Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
                dtype is automatically derived from the model's weights.
1lint's avatar
1lint committed
1379
1380
1381
1382
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            cache_dir (`Union[str, os.PathLike]`, *optional*):
Steven Liu's avatar
Steven Liu committed
1383
1384
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
1lint's avatar
1lint committed
1385
            resume_download (`bool`, *optional*, defaults to `False`):
Steven Liu's avatar
Steven Liu committed
1386
1387
                Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
                incompletely downloaded files are deleted.
1lint's avatar
1lint committed
1388
            proxies (`Dict[str, str]`, *optional*):
Steven Liu's avatar
Steven Liu committed
1389
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
1lint's avatar
1lint committed
1390
1391
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            local_files_only (`bool`, *optional*, defaults to `False`):
1392
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
Steven Liu's avatar
Steven Liu committed
1393
                won't be downloaded from the Hub.
1lint's avatar
1lint committed
1394
            use_auth_token (`str` or *bool*, *optional*):
Steven Liu's avatar
Steven Liu committed
1395
1396
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
1lint's avatar
1lint committed
1397
            revision (`str`, *optional*, defaults to `"main"`):
Steven Liu's avatar
Steven Liu committed
1398
1399
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
1400
            use_safetensors (`bool`, *optional*, defaults to `None`):
Steven Liu's avatar
Steven Liu committed
1401
1402
1403
1404
1405
                If set to `None`, the safetensors weights are downloaded if they're available **and** if the
                safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
                weights. If set to `False`, safetensors weights are not loaded.
            extract_ema (`bool`, *optional*, defaults to `False`):
                Whether to extract the EMA weights or not. Pass `True` to extract the EMA weights which usually yield
1406
                higher quality images for inference. Non-EMA weights are usually better for continuing finetuning.
1lint's avatar
1lint committed
1407
            upcast_attention (`bool`, *optional*, defaults to `None`):
Steven Liu's avatar
Steven Liu committed
1408
                Whether the attention computation should always be upcasted.
1lint's avatar
1lint committed
1409
            image_size (`int`, *optional*, defaults to 512):
Steven Liu's avatar
Steven Liu committed
1410
1411
                The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
                Diffusion v2 base model. Use 768 for Stable Diffusion v2.
1lint's avatar
1lint committed
1412
            prediction_type (`str`, *optional*):
Steven Liu's avatar
Steven Liu committed
1413
1414
1415
                The prediction type the model was trained on. Use `'epsilon'` for all Stable Diffusion v1 models and
                the Stable Diffusion v2 base model. Use `'v_prediction'` for Stable Diffusion v2.
            num_in_channels (`int`, *optional*, defaults to `None`):
1416
                The number of input channels. If `None`, it is automatically inferred.
Steven Liu's avatar
Steven Liu committed
1417
            scheduler_type (`str`, *optional*, defaults to `"pndm"`):
1lint's avatar
1lint committed
1418
1419
1420
                Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm",
                "ddim"]`.
            load_safety_checker (`bool`, *optional*, defaults to `True`):
Steven Liu's avatar
Steven Liu committed
1421
                Whether to load the safety checker or not.
1422
1423
1424
1425
            text_encoder ([`~transformers.CLIPTextModel`], *optional*, defaults to `None`):
                An instance of `CLIPTextModel` to use, specifically the
                [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. If this
                parameter is `None`, the function loads a new instance of `CLIPTextModel` by itself if needed.
1426
1427
1428
            vae (`AutoencoderKL`, *optional*, defaults to `None`):
                Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. If
                this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed.
1429
1430
1431
            tokenizer ([`~transformers.CLIPTokenizer`], *optional*, defaults to `None`):
                An instance of `CLIPTokenizer` to use. If this parameter is `None`, the function loads a new instance
                of `CLIPTokenizer` by itself if needed.
1lint's avatar
1lint committed
1432
            kwargs (remaining dictionary of keyword arguments, *optional*):
Steven Liu's avatar
Steven Liu committed
1433
1434
1435
                Can be used to overwrite load and saveable variables (for example the pipeline components of the
                specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
                method. See example below for more information.
1lint's avatar
1lint committed
1436
1437
1438
1439
1440
1441
1442

        Examples:

        ```py
        >>> from diffusers import StableDiffusionPipeline

        >>> # Download pipeline from huggingface.co and cache.
Patrick von Platen's avatar
Patrick von Platen committed
1443
        >>> pipeline = StableDiffusionPipeline.from_single_file(
1lint's avatar
1lint committed
1444
1445
1446
1447
1448
        ...     "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors"
        ... )

        >>> # Download pipeline from local file
        >>> # file is downloaded under ./v1-5-pruned-emaonly.ckpt
Patrick von Platen's avatar
Patrick von Platen committed
1449
        >>> pipeline = StableDiffusionPipeline.from_single_file("./v1-5-pruned-emaonly")
1lint's avatar
1lint committed
1450
1451

        >>> # Enable float16 and move to GPU
Patrick von Platen's avatar
Patrick von Platen committed
1452
        >>> pipeline = StableDiffusionPipeline.from_single_file(
1lint's avatar
1lint committed
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
        ...     "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt",
        ...     torch_dtype=torch.float16,
        ... )
        >>> pipeline.to("cuda")
        ```
        """
        # import here to avoid circular dependency
        from .pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt

        cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
        resume_download = kwargs.pop("resume_download", False)
        force_download = kwargs.pop("force_download", False)
        proxies = kwargs.pop("proxies", None)
        local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
        use_auth_token = kwargs.pop("use_auth_token", None)
        revision = kwargs.pop("revision", None)
        extract_ema = kwargs.pop("extract_ema", False)
1470
        image_size = kwargs.pop("image_size", None)
1lint's avatar
1lint committed
1471
1472
1473
1474
1475
        scheduler_type = kwargs.pop("scheduler_type", "pndm")
        num_in_channels = kwargs.pop("num_in_channels", None)
        upcast_attention = kwargs.pop("upcast_attention", None)
        load_safety_checker = kwargs.pop("load_safety_checker", True)
        prediction_type = kwargs.pop("prediction_type", None)
1476
        text_encoder = kwargs.pop("text_encoder", None)
1477
        vae = kwargs.pop("vae", None)
1478
        controlnet = kwargs.pop("controlnet", None)
1479
        tokenizer = kwargs.pop("tokenizer", None)
1lint's avatar
1lint committed
1480
1481
1482
1483
1484
1485
1486
1487
1488

        torch_dtype = kwargs.pop("torch_dtype", None)

        use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False)

        pipeline_name = cls.__name__
        file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
        from_safetensors = file_extension == "safetensors"

1489
        if from_safetensors and use_safetensors is False:
1lint's avatar
1lint committed
1490
1491
1492
1493
            raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")

        # TODO: For now we only support stable diffusion
        stable_unclip = None
1494
        model_type = None
1lint's avatar
1lint committed
1495

1496
1497
1498
1499
1500
1501
1502
1503
        if pipeline_name in [
            "StableDiffusionControlNetPipeline",
            "StableDiffusionControlNetImg2ImgPipeline",
            "StableDiffusionControlNetInpaintPipeline",
        ]:
            from .models.controlnet import ControlNetModel
            from .pipelines.controlnet.multicontrolnet import MultiControlNetModel

1504
            # Model type will be inferred from the checkpoint.
1505
1506
            if not isinstance(controlnet, (ControlNetModel, MultiControlNetModel)):
                raise ValueError("ControlNet needs to be passed if loading from ControlNet pipeline.")
1lint's avatar
1lint committed
1507
        elif "StableDiffusion" in pipeline_name:
1508
1509
            # Model type will be inferred from the checkpoint.
            pass
1lint's avatar
1lint committed
1510
        elif pipeline_name == "StableUnCLIPPipeline":
1511
            model_type = "FrozenOpenCLIPEmbedder"
1lint's avatar
1lint committed
1512
1513
            stable_unclip = "txt2img"
        elif pipeline_name == "StableUnCLIPImg2ImgPipeline":
1514
            model_type = "FrozenOpenCLIPEmbedder"
1lint's avatar
1lint committed
1515
1516
            stable_unclip = "img2img"
        elif pipeline_name == "PaintByExamplePipeline":
1517
            model_type = "PaintByExample"
1lint's avatar
1lint committed
1518
        elif pipeline_name == "LDMTextToImagePipeline":
1519
            model_type = "LDMTextToImage"
1lint's avatar
1lint committed
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
        else:
            raise ValueError(f"Unhandled pipeline class: {pipeline_name}")

        # remove huggingface url
        for prefix in ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"]:
            if pretrained_model_link_or_path.startswith(prefix):
                pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]

        # Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
        ckpt_path = Path(pretrained_model_link_or_path)
        if not ckpt_path.is_file():
            # get repo_id and (potentially nested) file path of ckpt in repo
1532
1533
            repo_id = os.path.join(*ckpt_path.parts[:2])
            file_path = os.path.join(*ckpt_path.parts[2:])
1lint's avatar
1lint committed
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566

            if file_path.startswith("blob/"):
                file_path = file_path[len("blob/") :]

            if file_path.startswith("main/"):
                file_path = file_path[len("main/") :]

            pretrained_model_link_or_path = hf_hub_download(
                repo_id,
                filename=file_path,
                cache_dir=cache_dir,
                resume_download=resume_download,
                proxies=proxies,
                local_files_only=local_files_only,
                use_auth_token=use_auth_token,
                revision=revision,
                force_download=force_download,
            )

        pipe = download_from_original_stable_diffusion_ckpt(
            pretrained_model_link_or_path,
            pipeline_class=cls,
            model_type=model_type,
            stable_unclip=stable_unclip,
            controlnet=controlnet,
            from_safetensors=from_safetensors,
            extract_ema=extract_ema,
            image_size=image_size,
            scheduler_type=scheduler_type,
            num_in_channels=num_in_channels,
            upcast_attention=upcast_attention,
            load_safety_checker=load_safety_checker,
            prediction_type=prediction_type,
1567
            text_encoder=text_encoder,
1568
            vae=vae,
1569
            tokenizer=tokenizer,
1lint's avatar
1lint committed
1570
1571
1572
1573
1574
1575
        )

        if torch_dtype is not None:
            pipe.to(torch_dtype=torch_dtype)

        return pipe
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911


class FromOriginalVAEMixin:
    @classmethod
    def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
        r"""
        Instantiate a [`AutoencoderKL`] from pretrained controlnet weights saved in the original `.ckpt` or
        `.safetensors` format. The pipeline is format. The pipeline is set in evaluation mode (`model.eval()`) by
        default.

        Parameters:
            pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
                Can be either:
                    - A link to the `.ckpt` file (for example
                      `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
                    - A path to a *file* containing all pipeline weights.
            torch_dtype (`str` or `torch.dtype`, *optional*):
                Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
                dtype is automatically derived from the model's weights.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            cache_dir (`Union[str, os.PathLike]`, *optional*):
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
            resume_download (`bool`, *optional*, defaults to `False`):
                Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
                incompletely downloaded files are deleted.
            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to True, the model
                won't be downloaded from the Hub.
            use_auth_token (`str` or *bool*, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
            image_size (`int`, *optional*, defaults to 512):
                The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
                Diffusion v2 base model. Use 768 for Stable Diffusion v2.
            use_safetensors (`bool`, *optional*, defaults to `None`):
                If set to `None`, the safetensors weights are downloaded if they're available **and** if the
                safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
                weights. If set to `False`, safetensors weights are not loaded.
            upcast_attention (`bool`, *optional*, defaults to `None`):
                Whether the attention computation should always be upcasted.
            scaling_factor (`float`, *optional*, defaults to 0.18215):
                The component-wise standard deviation of the trained latent space computed using the first batch of the
                training set. This is used to scale the latent space to have unit variance when training the diffusion
                model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
                diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z
                = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution
                Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
            kwargs (remaining dictionary of keyword arguments, *optional*):
                Can be used to overwrite load and saveable variables (for example the pipeline components of the
                specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
                method. See example below for more information.

        <Tip warning={true}>

            Make sure to pass both `image_size` and `scaling_factor` to `from_single_file()` if you want to load
            a VAE that does accompany a stable diffusion model of v2 or higher or SDXL.

        </Tip>

        Examples:

        ```py
        from diffusers import AutoencoderKL

        url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors"  # can also be local file
        model = AutoencoderKL.from_single_file(url)
        ```
        """
        if not is_omegaconf_available():
            raise ValueError(BACKENDS_MAPPING["omegaconf"][1])

        from omegaconf import OmegaConf

        from .models import AutoencoderKL

        # import here to avoid circular dependency
        from .pipelines.stable_diffusion.convert_from_ckpt import (
            convert_ldm_vae_checkpoint,
            create_vae_diffusers_config,
        )

        config_file = kwargs.pop("config_file", None)
        cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
        resume_download = kwargs.pop("resume_download", False)
        force_download = kwargs.pop("force_download", False)
        proxies = kwargs.pop("proxies", None)
        local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
        use_auth_token = kwargs.pop("use_auth_token", None)
        revision = kwargs.pop("revision", None)
        image_size = kwargs.pop("image_size", None)
        scaling_factor = kwargs.pop("scaling_factor", None)
        kwargs.pop("upcast_attention", None)

        torch_dtype = kwargs.pop("torch_dtype", None)

        use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False)

        file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
        from_safetensors = file_extension == "safetensors"

        if from_safetensors and use_safetensors is False:
            raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")

        # remove huggingface url
        for prefix in ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"]:
            if pretrained_model_link_or_path.startswith(prefix):
                pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]

        # Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
        ckpt_path = Path(pretrained_model_link_or_path)
        if not ckpt_path.is_file():
            # get repo_id and (potentially nested) file path of ckpt in repo
            repo_id = "/".join(ckpt_path.parts[:2])
            file_path = "/".join(ckpt_path.parts[2:])

            if file_path.startswith("blob/"):
                file_path = file_path[len("blob/") :]

            if file_path.startswith("main/"):
                file_path = file_path[len("main/") :]

            pretrained_model_link_or_path = hf_hub_download(
                repo_id,
                filename=file_path,
                cache_dir=cache_dir,
                resume_download=resume_download,
                proxies=proxies,
                local_files_only=local_files_only,
                use_auth_token=use_auth_token,
                revision=revision,
                force_download=force_download,
            )

        if from_safetensors:
            from safetensors import safe_open

            checkpoint = {}
            with safe_open(pretrained_model_link_or_path, framework="pt", device="cpu") as f:
                for key in f.keys():
                    checkpoint[key] = f.get_tensor(key)
        else:
            checkpoint = torch.load(pretrained_model_link_or_path, map_location="cpu")

        if "state_dict" in checkpoint:
            checkpoint = checkpoint["state_dict"]

        if config_file is None:
            config_url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
            config_file = BytesIO(requests.get(config_url).content)

        original_config = OmegaConf.load(config_file)

        # default to sd-v1-5
        image_size = image_size or 512

        vae_config = create_vae_diffusers_config(original_config, image_size=image_size)
        converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config)

        if scaling_factor is None:
            if (
                "model" in original_config
                and "params" in original_config.model
                and "scale_factor" in original_config.model.params
            ):
                vae_scaling_factor = original_config.model.params.scale_factor
            else:
                vae_scaling_factor = 0.18215  # default SD scaling factor

        vae_config["scaling_factor"] = vae_scaling_factor

        ctx = init_empty_weights if is_accelerate_available() else nullcontext
        with ctx():
            vae = AutoencoderKL(**vae_config)

        if is_accelerate_available():
            for param_name, param in converted_vae_checkpoint.items():
                set_module_tensor_to_device(vae, param_name, "cpu", value=param)
        else:
            vae.load_state_dict(converted_vae_checkpoint)

        if torch_dtype is not None:
            vae.to(torch_dtype=torch_dtype)

        return vae


class FromOriginalControlnetMixin:
    @classmethod
    def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
        r"""
        Instantiate a [`ControlNetModel`] from pretrained controlnet weights saved in the original `.ckpt` or
        `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default.

        Parameters:
            pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
                Can be either:
                    - A link to the `.ckpt` file (for example
                      `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
                    - A path to a *file* containing all pipeline weights.
            torch_dtype (`str` or `torch.dtype`, *optional*):
                Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
                dtype is automatically derived from the model's weights.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            cache_dir (`Union[str, os.PathLike]`, *optional*):
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
            resume_download (`bool`, *optional*, defaults to `False`):
                Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
                incompletely downloaded files are deleted.
            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to True, the model
                won't be downloaded from the Hub.
            use_auth_token (`str` or *bool*, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
            use_safetensors (`bool`, *optional*, defaults to `None`):
                If set to `None`, the safetensors weights are downloaded if they're available **and** if the
                safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
                weights. If set to `False`, safetensors weights are not loaded.
            image_size (`int`, *optional*, defaults to 512):
                The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
                Diffusion v2 base model. Use 768 for Stable Diffusion v2.
            upcast_attention (`bool`, *optional*, defaults to `None`):
                Whether the attention computation should always be upcasted.
            kwargs (remaining dictionary of keyword arguments, *optional*):
                Can be used to overwrite load and saveable variables (for example the pipeline components of the
                specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
                method. See example below for more information.

        Examples:

        ```py
        from diffusers import StableDiffusionControlnetPipeline, ControlNetModel

        url = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth"  # can also be a local path
        model = ControlNetModel.from_single_file(url)

        url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned.safetensors"  # can also be a local path
        pipe = StableDiffusionControlnetPipeline.from_single_file(url, controlnet=controlnet)
        ```
        """
        # import here to avoid circular dependency
        from .pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt

        config_file = kwargs.pop("config_file", None)
        cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
        resume_download = kwargs.pop("resume_download", False)
        force_download = kwargs.pop("force_download", False)
        proxies = kwargs.pop("proxies", None)
        local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
        use_auth_token = kwargs.pop("use_auth_token", None)
        num_in_channels = kwargs.pop("num_in_channels", None)
        use_linear_projection = kwargs.pop("use_linear_projection", None)
        revision = kwargs.pop("revision", None)
        extract_ema = kwargs.pop("extract_ema", False)
        image_size = kwargs.pop("image_size", None)
        upcast_attention = kwargs.pop("upcast_attention", None)

        torch_dtype = kwargs.pop("torch_dtype", None)

        use_safetensors = kwargs.pop("use_safetensors", None if is_safetensors_available() else False)

        file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
        from_safetensors = file_extension == "safetensors"

        if from_safetensors and use_safetensors is False:
            raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")

        # remove huggingface url
        for prefix in ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"]:
            if pretrained_model_link_or_path.startswith(prefix):
                pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]

        # Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
        ckpt_path = Path(pretrained_model_link_or_path)
        if not ckpt_path.is_file():
            # get repo_id and (potentially nested) file path of ckpt in repo
            repo_id = "/".join(ckpt_path.parts[:2])
            file_path = "/".join(ckpt_path.parts[2:])

            if file_path.startswith("blob/"):
                file_path = file_path[len("blob/") :]

            if file_path.startswith("main/"):
                file_path = file_path[len("main/") :]

            pretrained_model_link_or_path = hf_hub_download(
                repo_id,
                filename=file_path,
                cache_dir=cache_dir,
                resume_download=resume_download,
                proxies=proxies,
                local_files_only=local_files_only,
                use_auth_token=use_auth_token,
                revision=revision,
                force_download=force_download,
            )

        if config_file is None:
            config_url = "https://raw.githubusercontent.com/lllyasviel/ControlNet/main/models/cldm_v15.yaml"
            config_file = BytesIO(requests.get(config_url).content)

        image_size = image_size or 512

        controlnet = download_controlnet_from_original_ckpt(
            pretrained_model_link_or_path,
            original_config_file=config_file,
            image_size=image_size,
            extract_ema=extract_ema,
            num_in_channels=num_in_channels,
            upcast_attention=upcast_attention,
            from_safetensors=from_safetensors,
            use_linear_projection=use_linear_projection,
        )

        if torch_dtype is not None:
            controlnet.to(torch_dtype=torch_dtype)

        return controlnet