"vscode:/vscode.git/clone" did not exist on "ea8b5d7933a659b20127a852645ee1cbc5494f4e"
loaders.py 145 KB
Newer Older
Patrick von Platen's avatar
Patrick von Platen committed
1
# Copyright 2023 The HuggingFace Team. All rights reserved.
2
3
4
5
6
7
8
9
10
11
12
13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
import importlib
15
import os
16
import re
17
from collections import defaultdict
18
19
from contextlib import nullcontext
from io import BytesIO
1lint's avatar
1lint committed
20
from pathlib import Path
21
from typing import Callable, Dict, List, Optional, Union
22

23
import requests
24
import safetensors
25
import torch
26
from huggingface_hub import hf_hub_download, model_info
27
from packaging import version
Will Berman's avatar
Will Berman committed
28
from torch import nn
29

30
from . import __version__
31
from .models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_model_dict_into_meta
32
33
34
35
from .utils import (
    DIFFUSERS_CACHE,
    HF_HUB_OFFLINE,
    _get_model_file,
36
37
    convert_state_dict_to_diffusers,
    convert_state_dict_to_peft,
38
    deprecate,
39
40
    get_adapter_name,
    get_peft_kwargs,
41
42
    is_accelerate_available,
    is_omegaconf_available,
43
    is_peft_available,
44
45
    is_transformers_available,
    logging,
46
    recurse_remove_peft_layers,
47
48
49
    scale_lora_layers,
    set_adapter_layers,
    set_weights_and_activate_adapters,
50
)
51
from .utils.import_utils import BACKENDS_MAPPING
52
53


54
if is_transformers_available():
55
    from transformers import CLIPTextModel, CLIPTextModelWithProjection, PreTrainedModel
56

57
58
if is_accelerate_available():
    from accelerate import init_empty_weights
59
    from accelerate.hooks import AlignDevicesHook, CpuOffload, remove_hook_from_module
60
61
62

logger = logging.get_logger(__name__)

63
64
TEXT_ENCODER_NAME = "text_encoder"
UNET_NAME = "unet"
65
66

LORA_WEIGHT_NAME = "pytorch_lora_weights.bin"
67
LORA_WEIGHT_NAME_SAFE = "pytorch_lora_weights.safetensors"
68

69
70
71
TEXT_INVERSION_NAME = "learned_embeds.bin"
TEXT_INVERSION_NAME_SAFE = "learned_embeds.safetensors"

72
73
74
CUSTOM_DIFFUSION_WEIGHT_NAME = "pytorch_custom_diffusion_weights.bin"
CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE = "pytorch_custom_diffusion_weights.safetensors"

75

76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
# Below should be `True` if the current version of `peft` and `transformers` are compatible with
# PEFT backend. Will automatically fall back to PEFT backend if the correct versions of the libraries are
# available.
# For PEFT it is has to be greater than 0.6.0 and for transformers it has to be greater than 4.33.1.
_required_peft_version = is_peft_available() and version.parse(
    version.parse(importlib.metadata.version("peft")).base_version
) > version.parse("0.5")
_required_transformers_version = version.parse(
    version.parse(importlib.metadata.version("transformers")).base_version
) > version.parse("4.33")

USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version
LORA_DEPRECATION_MESSAGE = "You are using an old version of LoRA backend. This will be deprecated in the next releases in favor of PEFT make sure to install the latest PEFT and transformers packages in the future."


Will Berman's avatar
Will Berman committed
91
92
93
class PatchedLoraProjection(nn.Module):
    def __init__(self, regular_linear_layer, lora_scale=1, network_alpha=None, rank=4, dtype=None):
        super().__init__()
94
        from .models.lora import LoRALinearLayer
95

Will Berman's avatar
Will Berman committed
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
        self.regular_linear_layer = regular_linear_layer

        device = self.regular_linear_layer.weight.device

        if dtype is None:
            dtype = self.regular_linear_layer.weight.dtype

        self.lora_linear_layer = LoRALinearLayer(
            self.regular_linear_layer.in_features,
            self.regular_linear_layer.out_features,
            network_alpha=network_alpha,
            device=device,
            dtype=dtype,
            rank=rank,
        )

        self.lora_scale = lora_scale

Patrick von Platen's avatar
Patrick von Platen committed
114
115
116
117
118
119
120
121
122
123
    # overwrite PyTorch's `state_dict` to be sure that only the 'regular_linear_layer' weights are saved
    # when saving the whole text encoder model and when LoRA is unloaded or fused
    def state_dict(self, *args, destination=None, prefix="", keep_vars=False):
        if self.lora_linear_layer is None:
            return self.regular_linear_layer.state_dict(
                *args, destination=destination, prefix=prefix, keep_vars=keep_vars
            )

        return super().state_dict(*args, destination=destination, prefix=prefix, keep_vars=keep_vars)

124
    def _fuse_lora(self, lora_scale=1.0, safe_fusing=False):
Patrick von Platen's avatar
Patrick von Platen committed
125
126
127
128
129
130
131
132
133
134
135
136
        if self.lora_linear_layer is None:
            return

        dtype, device = self.regular_linear_layer.weight.data.dtype, self.regular_linear_layer.weight.data.device

        w_orig = self.regular_linear_layer.weight.data.float()
        w_up = self.lora_linear_layer.up.weight.data.float()
        w_down = self.lora_linear_layer.down.weight.data.float()

        if self.lora_linear_layer.network_alpha is not None:
            w_up = w_up * self.lora_linear_layer.network_alpha / self.lora_linear_layer.rank

137
        fused_weight = w_orig + (lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])
138
139
140
141
142
143
144
145

        if safe_fusing and torch.isnan(fused_weight).any().item():
            raise ValueError(
                "This LoRA weight seems to be broken. "
                f"Encountered NaN values when trying to fuse LoRA weights for {self}."
                "LoRA weights will not be fused."
            )

Patrick von Platen's avatar
Patrick von Platen committed
146
147
148
149
150
151
152
153
        self.regular_linear_layer.weight.data = fused_weight.to(device=device, dtype=dtype)

        # we can drop the lora layer now
        self.lora_linear_layer = None

        # offload the up and down matrices to CPU to not blow the memory
        self.w_up = w_up.cpu()
        self.w_down = w_down.cpu()
154
        self.lora_scale = lora_scale
Patrick von Platen's avatar
Patrick von Platen committed
155
156

    def _unfuse_lora(self):
157
        if not (getattr(self, "w_up", None) is not None and getattr(self, "w_down", None) is not None):
Patrick von Platen's avatar
Patrick von Platen committed
158
159
160
161
162
            return

        fused_weight = self.regular_linear_layer.weight.data
        dtype, device = fused_weight.dtype, fused_weight.device

Patrick von Platen's avatar
Patrick von Platen committed
163
164
165
        w_up = self.w_up.to(device=device).float()
        w_down = self.w_down.to(device).float()

166
        unfused_weight = fused_weight.float() - (self.lora_scale * torch.bmm(w_up[None, :], w_down[None, :])[0])
Patrick von Platen's avatar
Patrick von Platen committed
167
168
169
170
171
        self.regular_linear_layer.weight.data = unfused_weight.to(device=device, dtype=dtype)

        self.w_up = None
        self.w_down = None

Will Berman's avatar
Will Berman committed
172
    def forward(self, input):
173
174
        if self.lora_scale is None:
            self.lora_scale = 1.0
Patrick von Platen's avatar
Patrick von Platen committed
175
176
        if self.lora_linear_layer is None:
            return self.regular_linear_layer(input)
177
        return self.regular_linear_layer(input) + (self.lora_scale * self.lora_linear_layer(input))
Will Berman's avatar
Will Berman committed
178
179
180
181
182


def text_encoder_attn_modules(text_encoder):
    attn_modules = []

183
    if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
Will Berman's avatar
Will Berman committed
184
185
186
187
188
189
190
191
192
193
        for i, layer in enumerate(text_encoder.text_model.encoder.layers):
            name = f"text_model.encoder.layers.{i}.self_attn"
            mod = layer.self_attn
            attn_modules.append((name, mod))
    else:
        raise ValueError(f"do not know how to get attention modules for: {text_encoder.__class__.__name__}")

    return attn_modules


194
195
196
197
198
199
200
201
202
203
204
205
206
207
def text_encoder_mlp_modules(text_encoder):
    mlp_modules = []

    if isinstance(text_encoder, (CLIPTextModel, CLIPTextModelWithProjection)):
        for i, layer in enumerate(text_encoder.text_model.encoder.layers):
            mlp_mod = layer.mlp
            name = f"text_model.encoder.layers.{i}.mlp"
            mlp_modules.append((name, mlp_mod))
    else:
        raise ValueError(f"do not know how to get mlp modules for: {text_encoder.__class__.__name__}")

    return mlp_modules


Will Berman's avatar
Will Berman committed
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
def text_encoder_lora_state_dict(text_encoder):
    state_dict = {}

    for name, module in text_encoder_attn_modules(text_encoder):
        for k, v in module.q_proj.lora_linear_layer.state_dict().items():
            state_dict[f"{name}.q_proj.lora_linear_layer.{k}"] = v

        for k, v in module.k_proj.lora_linear_layer.state_dict().items():
            state_dict[f"{name}.k_proj.lora_linear_layer.{k}"] = v

        for k, v in module.v_proj.lora_linear_layer.state_dict().items():
            state_dict[f"{name}.v_proj.lora_linear_layer.{k}"] = v

        for k, v in module.out_proj.lora_linear_layer.state_dict().items():
            state_dict[f"{name}.out_proj.lora_linear_layer.{k}"] = v

    return state_dict


227
228
229
230
class AttnProcsLayers(torch.nn.Module):
    def __init__(self, state_dict: Dict[str, torch.Tensor]):
        super().__init__()
        self.layers = torch.nn.ModuleList(state_dict.values())
231
        self.mapping = dict(enumerate(state_dict.keys()))
232
233
        self.rev_mapping = {v: k for k, v in enumerate(state_dict.keys())}

234
235
        # .processor for unet, .self_attn for text encoder
        self.split_keys = [".processor", ".self_attn"]
236

237
238
239
240
241
242
243
244
245
246
247
        # we add a hook to state_dict() and load_state_dict() so that the
        # naming fits with `unet.attn_processors`
        def map_to(module, state_dict, *args, **kwargs):
            new_state_dict = {}
            for key, value in state_dict.items():
                num = int(key.split(".")[1])  # 0 is always "layers"
                new_key = key.replace(f"layers.{num}", module.mapping[num])
                new_state_dict[new_key] = value

            return new_state_dict

248
249
250
251
252
253
254
255
256
        def remap_key(key, state_dict):
            for k in self.split_keys:
                if k in key:
                    return key.split(k)[0] + k

            raise ValueError(
                f"There seems to be a problem with the state_dict: {set(state_dict.keys())}. {key} has to have one of {self.split_keys}."
            )

257
258
259
        def map_from(module, state_dict, *args, **kwargs):
            all_keys = list(state_dict.keys())
            for key in all_keys:
260
                replace_key = remap_key(key, state_dict)
261
262
263
264
265
266
267
268
269
                new_key = key.replace(replace_key, f"layers.{module.rev_mapping[replace_key]}")
                state_dict[new_key] = state_dict[key]
                del state_dict[key]

        self._register_state_dict_hook(map_to)
        self._register_load_state_dict_pre_hook(map_from, with_module=True)


class UNet2DConditionLoadersMixin:
270
271
272
    text_encoder_name = TEXT_ENCODER_NAME
    unet_name = UNET_NAME

273
274
    def load_attn_procs(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
        r"""
Steven Liu's avatar
Steven Liu committed
275
        Load pretrained attention processor layers into [`UNet2DConditionModel`]. Attention processor layers have to be
276
        defined in
Patrick von Platen's avatar
Patrick von Platen committed
277
        [`attention_processor.py`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py)
278
279
280
281
282
283
        and be a `torch.nn.Module` class.

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                Can be either:

Steven Liu's avatar
Steven Liu committed
284
285
286
287
                    - A string, the model id (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
                      the Hub.
                    - A path to a directory (for example `./my_model_directory`) containing the model weights saved
                      with [`ModelMixin.save_pretrained`].
288
289
290
291
                    - A [torch state
                      dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).

            cache_dir (`Union[str, os.PathLike]`, *optional*):
Steven Liu's avatar
Steven Liu committed
292
293
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
294
295
296
297
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download (`bool`, *optional*, defaults to `False`):
Steven Liu's avatar
Steven Liu committed
298
299
                Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
                incompletely downloaded files are deleted.
300
            proxies (`Dict[str, str]`, *optional*):
Steven Liu's avatar
Steven Liu committed
301
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
302
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
Steven Liu's avatar
Steven Liu committed
303
304
305
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
                won't be downloaded from the Hub.
306
            use_auth_token (`str` or *bool*, *optional*):
Steven Liu's avatar
Steven Liu committed
307
308
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
309
310
311
312
313
            low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
                Speed up model loading only loading the pretrained weights and not initializing the weights. This also
                tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
                Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
                argument to `True` will raise an error.
314
            revision (`str`, *optional*, defaults to `"main"`):
Steven Liu's avatar
Steven Liu committed
315
316
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
317
            subfolder (`str`, *optional*, defaults to `""`):
Steven Liu's avatar
Steven Liu committed
318
                The subfolder location of a model file within a larger model repository on the Hub or locally.
319
            mirror (`str`, *optional*):
Steven Liu's avatar
Steven Liu committed
320
321
322
                Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not
                guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
                information.
323
324

        """
325
326
327
        from .models.attention_processor import (
            CustomDiffusionAttnProcessor,
        )
328
        from .models.lora import LoRACompatibleConv, LoRACompatibleLinear, LoRAConv2dLayer, LoRALinearLayer
329
330
331
332
333
334
335
336
337

        cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
        force_download = kwargs.pop("force_download", False)
        resume_download = kwargs.pop("resume_download", False)
        proxies = kwargs.pop("proxies", None)
        local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
        use_auth_token = kwargs.pop("use_auth_token", None)
        revision = kwargs.pop("revision", None)
        subfolder = kwargs.pop("subfolder", None)
338
        weight_name = kwargs.pop("weight_name", None)
339
        use_safetensors = kwargs.pop("use_safetensors", None)
340
        low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)
341
342
        # This value has the same meaning as the `--network_alpha` option in the kohya-ss trainer script.
        # See https://github.com/darkstorm2150/sd-scripts/blob/main/docs/train_network_README-en.md#execute-learning
343
        network_alphas = kwargs.pop("network_alphas", None)
344
345
346

        _pipeline = kwargs.pop("_pipeline", None)

347
        is_network_alphas_none = network_alphas is None
348
349

        allow_pickle = False
350

351
        if use_safetensors is None:
352
            use_safetensors = True
353
            allow_pickle = True
354
355
356
357
358
359

        user_agent = {
            "file_type": "attn_procs_weights",
            "framework": "pytorch",
        }

360
361
362
363
364
365
366
367
368
        if low_cpu_mem_usage and not is_accelerate_available():
            low_cpu_mem_usage = False
            logger.warning(
                "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
                " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
                " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
                " install accelerate\n```\n."
            )

369
        model_file = None
370
        if not isinstance(pretrained_model_name_or_path_or_dict, dict):
371
            # Let's first try to load .safetensors weights
372
            if (use_safetensors and weight_name is None) or (
373
374
                weight_name is not None and weight_name.endswith(".safetensors")
            ):
375
376
377
                try:
                    model_file = _get_model_file(
                        pretrained_model_name_or_path_or_dict,
378
                        weights_name=weight_name or LORA_WEIGHT_NAME_SAFE,
379
380
381
382
383
384
385
386
387
388
389
                        cache_dir=cache_dir,
                        force_download=force_download,
                        resume_download=resume_download,
                        proxies=proxies,
                        local_files_only=local_files_only,
                        use_auth_token=use_auth_token,
                        revision=revision,
                        subfolder=subfolder,
                        user_agent=user_agent,
                    )
                    state_dict = safetensors.torch.load_file(model_file, device="cpu")
390
391
392
                except IOError as e:
                    if not allow_pickle:
                        raise e
393
394
                    # try loading non-safetensors weights
                    pass
395
396
397
            if model_file is None:
                model_file = _get_model_file(
                    pretrained_model_name_or_path_or_dict,
398
                    weights_name=weight_name or LORA_WEIGHT_NAME,
399
400
401
402
403
404
405
406
407
408
409
                    cache_dir=cache_dir,
                    force_download=force_download,
                    resume_download=resume_download,
                    proxies=proxies,
                    local_files_only=local_files_only,
                    use_auth_token=use_auth_token,
                    revision=revision,
                    subfolder=subfolder,
                    user_agent=user_agent,
                )
                state_dict = torch.load(model_file, map_location="cpu")
410
411
412
413
        else:
            state_dict = pretrained_model_name_or_path_or_dict

        # fill attn processors
414
        lora_layers_list = []
415

416
        is_lora = all(("lora" in k or k.endswith(".alpha")) for k in state_dict.keys())
417
        is_custom_diffusion = any("custom_diffusion" in k for k in state_dict.keys())
418
419

        if is_lora:
420
421
            # correct keys
            state_dict, network_alphas = self.convert_state_dict_legacy_attn_format(state_dict, network_alphas)
422

423
424
425
426
            if network_alphas is not None:
                network_alphas_keys = list(network_alphas.keys())
                used_network_alphas_keys = set()

427
            lora_grouped_dict = defaultdict(dict)
428
429
430
431
432
            mapped_network_alphas = {}

            all_keys = list(state_dict.keys())
            for key in all_keys:
                value = state_dict.pop(key)
433
434
435
                attn_processor_key, sub_key = ".".join(key.split(".")[:-3]), ".".join(key.split(".")[-3:])
                lora_grouped_dict[attn_processor_key][sub_key] = value

436
437
                # Create another `mapped_network_alphas` dictionary so that we can properly map them.
                if network_alphas is not None:
438
                    for k in network_alphas_keys:
439
                        if k.replace(".alpha", "") in key:
440
441
                            mapped_network_alphas.update({attn_processor_key: network_alphas.get(k)})
                            used_network_alphas_keys.add(k)
442
443

            if not is_network_alphas_none:
444
                if len(set(network_alphas_keys) - used_network_alphas_keys) > 0:
445
446
447
                    raise ValueError(
                        f"The `network_alphas` has to be empty at this point but has the following keys \n\n {', '.join(network_alphas.keys())}"
                    )
448
449
450

            if len(state_dict) > 0:
                raise ValueError(
451
                    f"The `state_dict` has to be empty at this point but has the following keys \n\n {', '.join(state_dict.keys())}"
452
453
                )

454
            for key, value_dict in lora_grouped_dict.items():
Will Berman's avatar
Will Berman committed
455
456
457
458
                attn_processor = self
                for sub_key in key.split("."):
                    attn_processor = getattr(attn_processor, sub_key)

459
460
                # Process non-attention layers, which don't have to_{k,v,q,out_proj}_lora layers
                # or add_{k,v,q,out_proj}_proj_lora layers.
461
462
463
464
465
466
467
                rank = value_dict["lora.down.weight"].shape[0]

                if isinstance(attn_processor, LoRACompatibleConv):
                    in_features = attn_processor.in_channels
                    out_features = attn_processor.out_channels
                    kernel_size = attn_processor.kernel_size

468
469
470
471
472
473
474
475
476
477
478
                    ctx = init_empty_weights if low_cpu_mem_usage else nullcontext
                    with ctx():
                        lora = LoRAConv2dLayer(
                            in_features=in_features,
                            out_features=out_features,
                            rank=rank,
                            kernel_size=kernel_size,
                            stride=attn_processor.stride,
                            padding=attn_processor.padding,
                            network_alpha=mapped_network_alphas.get(key),
                        )
479
                elif isinstance(attn_processor, LoRACompatibleLinear):
480
481
482
483
484
485
486
487
                    ctx = init_empty_weights if low_cpu_mem_usage else nullcontext
                    with ctx():
                        lora = LoRALinearLayer(
                            attn_processor.in_features,
                            attn_processor.out_features,
                            rank,
                            mapped_network_alphas.get(key),
                        )
Will Berman's avatar
Will Berman committed
488
                else:
489
                    raise ValueError(f"Module {key} is not a LoRACompatibleConv or LoRACompatibleLinear module.")
Will Berman's avatar
Will Berman committed
490

491
492
                value_dict = {k.replace("lora.", ""): v for k, v in value_dict.items()}
                lora_layers_list.append((attn_processor, lora))
493

494
495
496
497
498
499
                if low_cpu_mem_usage:
                    device = next(iter(value_dict.values())).device
                    dtype = next(iter(value_dict.values())).dtype
                    load_model_dict_into_meta(lora, value_dict, device=device, dtype=dtype)
                else:
                    lora.load_state_dict(value_dict)
500

501
        elif is_custom_diffusion:
502
            attn_processors = {}
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
            custom_diffusion_grouped_dict = defaultdict(dict)
            for key, value in state_dict.items():
                if len(value) == 0:
                    custom_diffusion_grouped_dict[key] = {}
                else:
                    if "to_out" in key:
                        attn_processor_key, sub_key = ".".join(key.split(".")[:-3]), ".".join(key.split(".")[-3:])
                    else:
                        attn_processor_key, sub_key = ".".join(key.split(".")[:-2]), ".".join(key.split(".")[-2:])
                    custom_diffusion_grouped_dict[attn_processor_key][sub_key] = value

            for key, value_dict in custom_diffusion_grouped_dict.items():
                if len(value_dict) == 0:
                    attn_processors[key] = CustomDiffusionAttnProcessor(
                        train_kv=False, train_q_out=False, hidden_size=None, cross_attention_dim=None
                    )
                else:
                    cross_attention_dim = value_dict["to_k_custom_diffusion.weight"].shape[1]
                    hidden_size = value_dict["to_k_custom_diffusion.weight"].shape[0]
                    train_q_out = True if "to_q_custom_diffusion.weight" in value_dict else False
                    attn_processors[key] = CustomDiffusionAttnProcessor(
                        train_kv=True,
                        train_q_out=train_q_out,
                        hidden_size=hidden_size,
                        cross_attention_dim=cross_attention_dim,
                    )
                    attn_processors[key].load_state_dict(value_dict)
530
        else:
531
532
533
            raise ValueError(
                f"{model_file} does not seem to be in the correct format expected by LoRA or Custom Diffusion training."
            )
534

535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
        # <Unsafe code
        # We can be sure that the following works as it just sets attention processors, lora layers and puts all in the same dtype
        # Now we remove any existing hooks to
        is_model_cpu_offload = False
        is_sequential_cpu_offload = False
        if _pipeline is not None:
            for _, component in _pipeline.components.items():
                if isinstance(component, nn.Module):
                    if hasattr(component, "_hf_hook"):
                        is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
                        is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
                        logger.info(
                            "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
                        )
                        remove_hook_from_module(component, recurse=is_sequential_cpu_offload)

        # only custom diffusion needs to set attn processors
        if is_custom_diffusion:
            self.set_attn_processor(attn_processors)

555
556
        # set lora layers
        for target_module, lora_layer in lora_layers_list:
557
            target_module.set_lora_layer(lora_layer)
558

559
560
        self.to(dtype=self.dtype, device=self.device)

561
562
563
564
565
566
567
        # Offload back.
        if is_model_cpu_offload:
            _pipeline.enable_model_cpu_offload()
        elif is_sequential_cpu_offload:
            _pipeline.enable_sequential_cpu_offload()
        # Unsafe code />

568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
    def convert_state_dict_legacy_attn_format(self, state_dict, network_alphas):
        is_new_lora_format = all(
            key.startswith(self.unet_name) or key.startswith(self.text_encoder_name) for key in state_dict.keys()
        )
        if is_new_lora_format:
            # Strip the `"unet"` prefix.
            is_text_encoder_present = any(key.startswith(self.text_encoder_name) for key in state_dict.keys())
            if is_text_encoder_present:
                warn_message = "The state_dict contains LoRA params corresponding to the text encoder which are not being used here. To use both UNet and text encoder related LoRA params, use [`pipe.load_lora_weights()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraLoaderMixin.load_lora_weights)."
                logger.warn(warn_message)
            unet_keys = [k for k in state_dict.keys() if k.startswith(self.unet_name)]
            state_dict = {k.replace(f"{self.unet_name}.", ""): v for k, v in state_dict.items() if k in unet_keys}

        # change processor format to 'pure' LoRACompatibleLinear format
        if any("processor" in k.split(".") for k in state_dict.keys()):

            def format_to_lora_compatible(key):
                if "processor" not in key.split("."):
                    return key
                return key.replace(".processor", "").replace("to_out_lora", "to_out.0.lora").replace("_lora", ".lora")

            state_dict = {format_to_lora_compatible(k): v for k, v in state_dict.items()}

            if network_alphas is not None:
                network_alphas = {format_to_lora_compatible(k): v for k, v in network_alphas.items()}
        return state_dict, network_alphas

595
596
597
598
    def save_attn_procs(
        self,
        save_directory: Union[str, os.PathLike],
        is_main_process: bool = True,
599
        weight_name: str = None,
600
        save_function: Callable = None,
601
602
        safe_serialization: bool = True,
        **kwargs,
603
604
    ):
        r"""
Steven Liu's avatar
Steven Liu committed
605
        Save an attention processor to a directory so that it can be reloaded using the
606
        [`~loaders.UNet2DConditionLoadersMixin.load_attn_procs`] method.
607
608
609

        Arguments:
            save_directory (`str` or `os.PathLike`):
Steven Liu's avatar
Steven Liu committed
610
                Directory to save an attention processor to. Will be created if it doesn't exist.
611
            is_main_process (`bool`, *optional*, defaults to `True`):
Steven Liu's avatar
Steven Liu committed
612
613
614
                Whether the process calling this is the main process or not. Useful during distributed training and you
                need to call this function on all processes. In this case, set `is_main_process=True` only on the main
                process to avoid race conditions.
615
            save_function (`Callable`):
Steven Liu's avatar
Steven Liu committed
616
617
                The function to use to save the state dictionary. Useful during distributed training when you need to
                replace `torch.save` with another method. Can be configured with the environment variable
618
                `DIFFUSERS_SAVE_MODE`.
619
620
            safe_serialization (`bool`, *optional*, defaults to `True`):
                Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
621
        """
622
623
        from .models.attention_processor import (
            CustomDiffusionAttnProcessor,
624
            CustomDiffusionAttnProcessor2_0,
625
626
627
            CustomDiffusionXFormersAttnProcessor,
        )

628
629
630
631
632
        if os.path.isfile(save_directory):
            logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
            return

        if save_function is None:
633
634
635
636
637
638
639
            if safe_serialization:

                def save_function(weights, filename):
                    return safetensors.torch.save_file(weights, filename, metadata={"format": "pt"})

            else:
                save_function = torch.save
640
641
642

        os.makedirs(save_directory, exist_ok=True)

643
        is_custom_diffusion = any(
644
645
646
647
            isinstance(
                x,
                (CustomDiffusionAttnProcessor, CustomDiffusionAttnProcessor2_0, CustomDiffusionXFormersAttnProcessor),
            )
648
649
650
651
652
653
654
            for (_, x) in self.attn_processors.items()
        )
        if is_custom_diffusion:
            model_to_save = AttnProcsLayers(
                {
                    y: x
                    for (y, x) in self.attn_processors.items()
655
656
657
658
659
660
661
662
                    if isinstance(
                        x,
                        (
                            CustomDiffusionAttnProcessor,
                            CustomDiffusionAttnProcessor2_0,
                            CustomDiffusionXFormersAttnProcessor,
                        ),
                    )
663
664
665
666
667
668
669
670
671
                }
            )
            state_dict = model_to_save.state_dict()
            for name, attn in self.attn_processors.items():
                if len(attn.state_dict()) == 0:
                    state_dict[name] = {}
        else:
            model_to_save = AttnProcsLayers(self.attn_processors)
            state_dict = model_to_save.state_dict()
672

673
        if weight_name is None:
674
            if safe_serialization:
675
                weight_name = CUSTOM_DIFFUSION_WEIGHT_NAME_SAFE if is_custom_diffusion else LORA_WEIGHT_NAME_SAFE
676
            else:
677
                weight_name = CUSTOM_DIFFUSION_WEIGHT_NAME if is_custom_diffusion else LORA_WEIGHT_NAME
678

679
        # Save the model
680
681
        save_function(state_dict, os.path.join(save_directory, weight_name))
        logger.info(f"Model weights saved in {os.path.join(save_directory, weight_name)}")
682

683
    def fuse_lora(self, lora_scale=1.0, safe_fusing=False):
684
        self.lora_scale = lora_scale
685
        self._safe_fusing = safe_fusing
Patrick von Platen's avatar
Patrick von Platen committed
686
687
688
689
        self.apply(self._fuse_lora_apply)

    def _fuse_lora_apply(self, module):
        if hasattr(module, "_fuse_lora"):
690
            module._fuse_lora(self.lora_scale, self._safe_fusing)
Patrick von Platen's avatar
Patrick von Platen committed
691
692
693
694
695
696
697
698

    def unfuse_lora(self):
        self.apply(self._unfuse_lora_apply)

    def _unfuse_lora_apply(self, module):
        if hasattr(module, "_unfuse_lora"):
            module._unfuse_lora()

699

700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
def load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs):
    cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
    force_download = kwargs.pop("force_download", False)
    resume_download = kwargs.pop("resume_download", False)
    proxies = kwargs.pop("proxies", None)
    local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
    use_auth_token = kwargs.pop("use_auth_token", None)
    revision = kwargs.pop("revision", None)
    subfolder = kwargs.pop("subfolder", None)
    weight_name = kwargs.pop("weight_name", None)
    use_safetensors = kwargs.pop("use_safetensors", None)

    allow_pickle = False
    if use_safetensors is None:
        use_safetensors = True
        allow_pickle = True

    user_agent = {
        "file_type": "text_inversion",
        "framework": "pytorch",
    }
    state_dicts = []
    for pretrained_model_name_or_path in pretrained_model_name_or_paths:
        if not isinstance(pretrained_model_name_or_path, (dict, torch.Tensor)):
            # 3.1. Load textual inversion file
            model_file = None

            # Let's first try to load .safetensors weights
            if (use_safetensors and weight_name is None) or (
                weight_name is not None and weight_name.endswith(".safetensors")
            ):
                try:
                    model_file = _get_model_file(
                        pretrained_model_name_or_path,
                        weights_name=weight_name or TEXT_INVERSION_NAME_SAFE,
                        cache_dir=cache_dir,
                        force_download=force_download,
                        resume_download=resume_download,
                        proxies=proxies,
                        local_files_only=local_files_only,
                        use_auth_token=use_auth_token,
                        revision=revision,
                        subfolder=subfolder,
                        user_agent=user_agent,
                    )
                    state_dict = safetensors.torch.load_file(model_file, device="cpu")
                except Exception as e:
                    if not allow_pickle:
                        raise e

                    model_file = None

            if model_file is None:
                model_file = _get_model_file(
                    pretrained_model_name_or_path,
                    weights_name=weight_name or TEXT_INVERSION_NAME,
                    cache_dir=cache_dir,
                    force_download=force_download,
                    resume_download=resume_download,
                    proxies=proxies,
                    local_files_only=local_files_only,
                    use_auth_token=use_auth_token,
                    revision=revision,
                    subfolder=subfolder,
                    user_agent=user_agent,
                )
                state_dict = torch.load(model_file, map_location="cpu")
        else:
            state_dict = pretrained_model_name_or_path

        state_dicts.append(state_dict)

    return state_dicts


775
776
class TextualInversionLoaderMixin:
    r"""
Steven Liu's avatar
Steven Liu committed
777
    Load textual inversion tokens and embeddings to the tokenizer and text encoder.
778
779
    """

780
    def maybe_convert_prompt(self, prompt: Union[str, List[str]], tokenizer: "PreTrainedTokenizer"):  # noqa: F821
781
        r"""
Steven Liu's avatar
Steven Liu committed
782
783
784
        Processes prompts that include a special token corresponding to a multi-vector textual inversion embedding to
        be replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual
        inversion token or if the textual inversion token is a single vector, the input prompt is returned.
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806

        Parameters:
            prompt (`str` or list of `str`):
                The prompt or prompts to guide the image generation.
            tokenizer (`PreTrainedTokenizer`):
                The tokenizer responsible for encoding the prompt into input tokens.

        Returns:
            `str` or list of `str`: The converted prompt
        """
        if not isinstance(prompt, List):
            prompts = [prompt]
        else:
            prompts = prompt

        prompts = [self._maybe_convert_prompt(p, tokenizer) for p in prompts]

        if not isinstance(prompt, List):
            return prompts[0]

        return prompts

807
    def _maybe_convert_prompt(self, prompt: str, tokenizer: "PreTrainedTokenizer"):  # noqa: F821
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
        r"""
        Maybe convert a prompt into a "multi vector"-compatible prompt. If the prompt includes a token that corresponds
        to a multi-vector textual inversion embedding, this function will process the prompt so that the special token
        is replaced with multiple special tokens each corresponding to one of the vectors. If the prompt has no textual
        inversion token or a textual inversion token that is a single vector, the input prompt is simply returned.

        Parameters:
            prompt (`str`):
                The prompt to guide the image generation.
            tokenizer (`PreTrainedTokenizer`):
                The tokenizer responsible for encoding the prompt into input tokens.

        Returns:
            `str`: The converted prompt
        """
        tokens = tokenizer.tokenize(prompt)
824
825
        unique_tokens = set(tokens)
        for token in unique_tokens:
826
827
828
829
            if token in tokenizer.added_tokens_encoder:
                replacement = token
                i = 1
                while f"{token}_{i}" in tokenizer.added_tokens_encoder:
830
                    replacement += f" {token}_{i}"
831
832
833
834
835
836
                    i += 1

                prompt = prompt.replace(token, replacement)

        return prompt

837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
    def _check_text_inv_inputs(self, tokenizer, text_encoder, pretrained_model_name_or_paths, tokens):
        if tokenizer is None:
            raise ValueError(
                f"{self.__class__.__name__} requires `self.tokenizer` or passing a `tokenizer` of type `PreTrainedTokenizer` for calling"
                f" `{self.load_textual_inversion.__name__}`"
            )

        if text_encoder is None:
            raise ValueError(
                f"{self.__class__.__name__} requires `self.text_encoder` or passing a `text_encoder` of type `PreTrainedModel` for calling"
                f" `{self.load_textual_inversion.__name__}`"
            )

        if len(pretrained_model_name_or_paths) != len(tokens):
            raise ValueError(
                f"You have passed a list of models of length {len(pretrained_model_name_or_paths)}, and list of tokens of length {len(tokens)} "
                f"Make sure both lists have the same length."
            )

        valid_tokens = [t for t in tokens if t is not None]
        if len(set(valid_tokens)) < len(valid_tokens):
            raise ValueError(f"You have passed a list of tokens that contains duplicates: {tokens}")

    @staticmethod
    def _retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer):
        all_tokens = []
        all_embeddings = []
        for state_dict, token in zip(state_dicts, tokens):
            if isinstance(state_dict, torch.Tensor):
                if token is None:
                    raise ValueError(
                        "You are trying to load a textual inversion embedding that has been saved as a PyTorch tensor. Make sure to pass the name of the corresponding token in this case: `token=...`."
                    )
                loaded_token = token
                embedding = state_dict
            elif len(state_dict) == 1:
                # diffusers
                loaded_token, embedding = next(iter(state_dict.items()))
            elif "string_to_param" in state_dict:
                # A1111
                loaded_token = state_dict["name"]
                embedding = state_dict["string_to_param"]["*"]
            else:
                raise ValueError(
                    f"Loaded state dictonary is incorrect: {state_dict}. \n\n"
                    "Please verify that the loaded state dictionary of the textual embedding either only has a single key or includes the `string_to_param`"
                    " input key."
                )

            if token is not None and loaded_token != token:
                logger.info(f"The loaded token: {loaded_token} is overwritten by the passed token {token}.")
            else:
                token = loaded_token

            if token in tokenizer.get_vocab():
                raise ValueError(
                    f"Token {token} already in tokenizer vocabulary. Please choose a different token name or remove {token} and embedding from the tokenizer and text encoder."
                )

            all_tokens.append(token)
            all_embeddings.append(embedding)

        return all_tokens, all_embeddings

    @staticmethod
    def _extend_tokens_and_embeddings(tokens, embeddings, tokenizer):
        all_tokens = []
        all_embeddings = []

        for embedding, token in zip(embeddings, tokens):
            if f"{token}_1" in tokenizer.get_vocab():
                multi_vector_tokens = [token]
                i = 1
                while f"{token}_{i}" in tokenizer.added_tokens_encoder:
                    multi_vector_tokens.append(f"{token}_{i}")
                    i += 1

                raise ValueError(
                    f"Multi-vector Token {multi_vector_tokens} already in tokenizer vocabulary. Please choose a different token name or remove the {multi_vector_tokens} and embedding from the tokenizer and text encoder."
                )

            is_multi_vector = len(embedding.shape) > 1 and embedding.shape[0] > 1
            if is_multi_vector:
                all_tokens += [token] + [f"{token}_{i}" for i in range(1, embedding.shape[0])]
                all_embeddings += [e for e in embedding]  # noqa: C416
            else:
                all_tokens += [token]
                all_embeddings += [embedding[0]] if len(embedding.shape) > 1 else [embedding]

        return all_tokens, all_embeddings

928
    def load_textual_inversion(
929
        self,
930
        pretrained_model_name_or_path: Union[str, List[str], Dict[str, torch.Tensor], List[Dict[str, torch.Tensor]]],
931
        token: Optional[Union[str, List[str]]] = None,
932
933
        tokenizer: Optional["PreTrainedTokenizer"] = None,  # noqa: F821
        text_encoder: Optional["PreTrainedModel"] = None,  # noqa: F821
934
        **kwargs,
935
936
    ):
        r"""
Steven Liu's avatar
Steven Liu committed
937
938
        Load textual inversion embeddings into the text encoder of [`StableDiffusionPipeline`] (both 🤗 Diffusers and
        Automatic1111 formats are supported).
939
940

        Parameters:
941
            pretrained_model_name_or_path (`str` or `os.PathLike` or `List[str or os.PathLike]` or `Dict` or `List[Dict]`):
Steven Liu's avatar
Steven Liu committed
942
                Can be either one of the following or a list of them:
943

Steven Liu's avatar
Steven Liu committed
944
945
946
947
948
                    - A string, the *model id* (for example `sd-concepts-library/low-poly-hd-logos-icons`) of a
                      pretrained model hosted on the Hub.
                    - A path to a *directory* (for example `./my_text_inversion_directory/`) containing the textual
                      inversion weights.
                    - A path to a *file* (for example `./my_text_inversions.pt`) containing textual inversion weights.
949
950
                    - A [torch state
                      dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
951
952
953
954

            token (`str` or `List[str]`, *optional*):
                Override the token to use for the textual inversion weights. If `pretrained_model_name_or_path` is a
                list, then `token` must also be a list of equal length.
955
956
957
958
959
            text_encoder ([`~transformers.CLIPTextModel`], *optional*):
                Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
                If not specified, function will take self.tokenizer.
            tokenizer ([`~transformers.CLIPTokenizer`], *optional*):
                A `CLIPTokenizer` to tokenize text. If not specified, function will take self.tokenizer.
960
            weight_name (`str`, *optional*):
Steven Liu's avatar
Steven Liu committed
961
                Name of a custom weight file. This should be used when:
962

Steven Liu's avatar
Steven Liu committed
963
964
965
                    - The saved textual inversion file is in 🤗 Diffusers format, but was saved under a specific weight
                      name such as `text_inv.bin`.
                    - The saved textual inversion file is in the Automatic1111 format.
966
            cache_dir (`Union[str, os.PathLike]`, *optional*):
Steven Liu's avatar
Steven Liu committed
967
968
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
969
970
971
972
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download (`bool`, *optional*, defaults to `False`):
Steven Liu's avatar
Steven Liu committed
973
974
                Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
                incompletely downloaded files are deleted.
975
            proxies (`Dict[str, str]`, *optional*):
Steven Liu's avatar
Steven Liu committed
976
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
977
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
Steven Liu's avatar
Steven Liu committed
978
979
980
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
                won't be downloaded from the Hub.
981
            use_auth_token (`str` or *bool*, *optional*):
Steven Liu's avatar
Steven Liu committed
982
983
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
984
            revision (`str`, *optional*, defaults to `"main"`):
Steven Liu's avatar
Steven Liu committed
985
986
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
987
            subfolder (`str`, *optional*, defaults to `""`):
Steven Liu's avatar
Steven Liu committed
988
                The subfolder location of a model file within a larger model repository on the Hub or locally.
989
            mirror (`str`, *optional*):
Steven Liu's avatar
Steven Liu committed
990
991
992
                Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
                guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
                information.
993
994
995

        Example:

Steven Liu's avatar
Steven Liu committed
996
        To load a textual inversion embedding vector in 🤗 Diffusers format:
1lint's avatar
1lint committed
997

998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
        ```py
        from diffusers import StableDiffusionPipeline
        import torch

        model_id = "runwayml/stable-diffusion-v1-5"
        pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")

        pipe.load_textual_inversion("sd-concepts-library/cat-toy")

        prompt = "A <cat-toy> backpack"

        image = pipe(prompt, num_inference_steps=50).images[0]
        image.save("cat-backpack.png")
        ```

Steven Liu's avatar
Steven Liu committed
1013
1014
1015
        To load a textual inversion embedding vector in Automatic1111 format, make sure to download the vector first
        (for example from [civitAI](https://civitai.com/models/3036?modelVersionId=9857)) and then load the vector
        locally:
1016
1017
1018
1019
1020
1021
1022
1023

        ```py
        from diffusers import StableDiffusionPipeline
        import torch

        model_id = "runwayml/stable-diffusion-v1-5"
        pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda")

1024
        pipe.load_textual_inversion("./charturnerv2.pt", token="charturnerv2")
1025
1026
1027
1028
1029
1030

        prompt = "charturnerv2, multiple views of the same character in the same outfit, a character turnaround of a woman wearing a black jacket and red shirt, best quality, intricate details."

        image = pipe(prompt, num_inference_steps=50).images[0]
        image.save("character.png")
        ```
1lint's avatar
1lint committed
1031

1032
        """
1033
        # 1. Set correct tokenizer and text encoder
1034
1035
1036
        tokenizer = tokenizer or getattr(self, "tokenizer", None)
        text_encoder = text_encoder or getattr(self, "text_encoder", None)

1037
1038
1039
1040
1041
1042
1043
        # 2. Normalize inputs
        pretrained_model_name_or_paths = (
            [pretrained_model_name_or_path]
            if not isinstance(pretrained_model_name_or_path, list)
            else pretrained_model_name_or_path
        )
        tokens = len(pretrained_model_name_or_paths) * [token] if (isinstance(token, str) or token is None) else token
1044

1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
        # 3. Check inputs
        self._check_text_inv_inputs(tokenizer, text_encoder, pretrained_model_name_or_paths, tokens)

        # 4. Load state dicts of textual embeddings
        state_dicts = load_textual_inversion_state_dicts(pretrained_model_name_or_paths, **kwargs)

        # 4. Retrieve tokens and embeddings
        tokens, embeddings = self._retrieve_tokens_and_embeddings(tokens, state_dicts, tokenizer)

        # 5. Extend tokens and embeddings for multi vector
        tokens, embeddings = self._extend_tokens_and_embeddings(tokens, embeddings, tokenizer)

        # 6. Make sure all embeddings have the correct size
        expected_emb_dim = text_encoder.get_input_embeddings().weight.shape[-1]
        if any(expected_emb_dim != emb.shape[-1] for emb in embeddings):
1060
            raise ValueError(
1061
1062
                "Loaded embeddings are of incorrect shape. Expected each textual inversion embedding "
                "to be of shape {input_embeddings.shape[-1]}, but are {embeddings.shape[-1]} "
1063
1064
            )

1065
1066
1067
1068
        # 7. Now we can be sure that loading the embedding matrix works
        # < Unsafe code:

        # 7.1 Offload all hooks in case the pipeline was cpu offloaded before make sure, we offload and onload again
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
        is_model_cpu_offload = False
        is_sequential_cpu_offload = False
        for _, component in self.components.items():
            if isinstance(component, nn.Module):
                if hasattr(component, "_hf_hook"):
                    is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
                    is_sequential_cpu_offload = isinstance(getattr(component, "_hf_hook"), AlignDevicesHook)
                    logger.info(
                        "Accelerate hooks detected. Since you have called `load_textual_inversion()`, the previous hooks will be first removed. Then the textual inversion parameters will be loaded and the hooks will be applied again."
                    )
1079
                    remove_hook_from_module(component, recurse=is_sequential_cpu_offload)
1080

1081
1082
1083
        # 7.2 save expected device and dtype
        device = text_encoder.device
        dtype = text_encoder.dtype
1084

1085
1086
1087
        # 7.3 Increase token embedding matrix
        text_encoder.resize_token_embeddings(len(tokenizer) + len(tokens))
        input_embeddings = text_encoder.get_input_embeddings().weight
1088

1089
1090
        # 7.4 Load token and embedding
        for token, embedding in zip(tokens, embeddings):
1091
            # add tokens and get ids
1092
1093
1094
            tokenizer.add_tokens(token)
            token_id = tokenizer.convert_tokens_to_ids(token)
            input_embeddings.data[token_id] = embedding
1095
            logger.info(f"Loaded textual inversion embedding for {token}.")
1096

1097
        input_embeddings.to(dtype=dtype, device=device)
1098

1099
        # 7.5 Offload the model again
1100
1101
1102
1103
1104
        if is_model_cpu_offload:
            self.enable_model_cpu_offload()
        elif is_sequential_cpu_offload:
            self.enable_sequential_cpu_offload()

1105
1106
        # / Unsafe Code >

1107
1108
1109

class LoraLoaderMixin:
    r"""
Steven Liu's avatar
Steven Liu committed
1110
1111
    Load LoRA layers into [`UNet2DConditionModel`] and
    [`CLIPTextModel`](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel).
1112
    """
1113
1114
    text_encoder_name = TEXT_ENCODER_NAME
    unet_name = UNET_NAME
1115
    num_fused_loras = 0
1116
    use_peft_backend = USE_PEFT_BACKEND
1117

1118
1119
1120
    def load_lora_weights(
        self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], adapter_name=None, **kwargs
    ):
Will Berman's avatar
Will Berman committed
1121
        """
1122
1123
        Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
        `self.text_encoder`.
Will Berman's avatar
Will Berman committed
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137

        All kwargs are forwarded to `self.lora_state_dict`.

        See [`~loaders.LoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.

        See [`~loaders.LoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into
        `self.unet`.

        See [`~loaders.LoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded
        into `self.text_encoder`.

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                See [`~loaders.LoraLoaderMixin.lora_state_dict`].
1138
            kwargs (`dict`, *optional*):
Will Berman's avatar
Will Berman committed
1139
                See [`~loaders.LoraLoaderMixin.lora_state_dict`].
1140
1141
1142
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
Will Berman's avatar
Will Berman committed
1143
        """
1144
1145
1146
1147
1148
1149
        # First, ensure that the checkpoint is a compatible one and can be successfully loaded.
        state_dict, network_alphas = self.lora_state_dict(pretrained_model_name_or_path_or_dict, **kwargs)

        is_correct_format = all("lora" in key for key in state_dict.keys())
        if not is_correct_format:
            raise ValueError("Invalid LoRA checkpoint.")
1150

1151
1152
1153
        low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)

        self.load_lora_into_unet(
1154
1155
1156
1157
1158
            state_dict,
            network_alphas=network_alphas,
            unet=self.unet,
            low_cpu_mem_usage=low_cpu_mem_usage,
            _pipeline=self,
1159
        )
Will Berman's avatar
Will Berman committed
1160
        self.load_lora_into_text_encoder(
1161
            state_dict,
1162
            network_alphas=network_alphas,
1163
1164
            text_encoder=self.text_encoder,
            lora_scale=self.lora_scale,
1165
            low_cpu_mem_usage=low_cpu_mem_usage,
1166
            adapter_name=adapter_name,
1167
            _pipeline=self,
Will Berman's avatar
Will Berman committed
1168
1169
1170
1171
1172
1173
1174
1175
        )

    @classmethod
    def lora_state_dict(
        cls,
        pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]],
        **kwargs,
    ):
1176
        r"""
1177
        Return state dict for lora weights and the network alphas.
Will Berman's avatar
Will Berman committed
1178
1179
1180
1181
1182
1183
1184
1185

        <Tip warning={true}>

        We support loading A1111 formatted LoRA checkpoints in a limited capacity.

        This function is experimental and might change in the future.

        </Tip>
1186
1187
1188
1189
1190

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                Can be either:

Steven Liu's avatar
Steven Liu committed
1191
1192
1193
1194
                    - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
                      the Hub.
                    - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
                      with [`ModelMixin.save_pretrained`].
1195
1196
1197
1198
                    - A [torch state
                      dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).

            cache_dir (`Union[str, os.PathLike]`, *optional*):
Steven Liu's avatar
Steven Liu committed
1199
1200
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
1201
1202
1203
1204
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            resume_download (`bool`, *optional*, defaults to `False`):
Steven Liu's avatar
Steven Liu committed
1205
1206
                Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
                incompletely downloaded files are deleted.
1207
            proxies (`Dict[str, str]`, *optional*):
Steven Liu's avatar
Steven Liu committed
1208
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
1209
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
Steven Liu's avatar
Steven Liu committed
1210
1211
1212
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
                won't be downloaded from the Hub.
1213
            use_auth_token (`str` or *bool*, *optional*):
Steven Liu's avatar
Steven Liu committed
1214
1215
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
1216
            revision (`str`, *optional*, defaults to `"main"`):
Steven Liu's avatar
Steven Liu committed
1217
1218
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
1219
            subfolder (`str`, *optional*, defaults to `""`):
Steven Liu's avatar
Steven Liu committed
1220
                The subfolder location of a model file within a larger model repository on the Hub or locally.
1221
1222
1223
1224
1225
            low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
                Speed up model loading only loading the pretrained weights and not initializing the weights. This also
                tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
                Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
                argument to `True` will raise an error.
1226
            mirror (`str`, *optional*):
Steven Liu's avatar
Steven Liu committed
1227
1228
1229
                Mirror source to resolve accessibility issues if you're downloading a model in China. We do not
                guarantee the timeliness or safety of the source, and you should refer to the mirror site for more
                information.
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242

        """
        # Load the main state dict first which has the LoRA layers for either of
        # UNet and text encoder or both.
        cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
        force_download = kwargs.pop("force_download", False)
        resume_download = kwargs.pop("resume_download", False)
        proxies = kwargs.pop("proxies", None)
        local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
        use_auth_token = kwargs.pop("use_auth_token", None)
        revision = kwargs.pop("revision", None)
        subfolder = kwargs.pop("subfolder", None)
        weight_name = kwargs.pop("weight_name", None)
1243
        unet_config = kwargs.pop("unet_config", None)
1244
1245
1246
1247
        use_safetensors = kwargs.pop("use_safetensors", None)

        allow_pickle = False
        if use_safetensors is None:
1248
            use_safetensors = True
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
            allow_pickle = True

        user_agent = {
            "file_type": "attn_procs_weights",
            "framework": "pytorch",
        }

        model_file = None
        if not isinstance(pretrained_model_name_or_path_or_dict, dict):
            # Let's first try to load .safetensors weights
            if (use_safetensors and weight_name is None) or (
                weight_name is not None and weight_name.endswith(".safetensors")
            ):
                try:
1263
1264
1265
1266
1267
1268
1269
                    # Here we're relaxing the loading check to enable more Inference API
                    # friendliness where sometimes, it's not at all possible to automatically
                    # determine `weight_name`.
                    if weight_name is None:
                        weight_name = cls._best_guess_weight_name(
                            pretrained_model_name_or_path_or_dict, file_extension=".safetensors"
                        )
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
                    model_file = _get_model_file(
                        pretrained_model_name_or_path_or_dict,
                        weights_name=weight_name or LORA_WEIGHT_NAME_SAFE,
                        cache_dir=cache_dir,
                        force_download=force_download,
                        resume_download=resume_download,
                        proxies=proxies,
                        local_files_only=local_files_only,
                        use_auth_token=use_auth_token,
                        revision=revision,
                        subfolder=subfolder,
                        user_agent=user_agent,
                    )
                    state_dict = safetensors.torch.load_file(model_file, device="cpu")
Will Berman's avatar
Will Berman committed
1284
                except (IOError, safetensors.SafetensorError) as e:
1285
1286
1287
                    if not allow_pickle:
                        raise e
                    # try loading non-safetensors weights
1288
                    model_file = None
1289
                    pass
1290

1291
            if model_file is None:
1292
1293
1294
1295
                if weight_name is None:
                    weight_name = cls._best_guess_weight_name(
                        pretrained_model_name_or_path_or_dict, file_extension=".bin"
                    )
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
                model_file = _get_model_file(
                    pretrained_model_name_or_path_or_dict,
                    weights_name=weight_name or LORA_WEIGHT_NAME,
                    cache_dir=cache_dir,
                    force_download=force_download,
                    resume_download=resume_download,
                    proxies=proxies,
                    local_files_only=local_files_only,
                    use_auth_token=use_auth_token,
                    revision=revision,
                    subfolder=subfolder,
                    user_agent=user_agent,
                )
                state_dict = torch.load(model_file, map_location="cpu")
        else:
            state_dict = pretrained_model_name_or_path_or_dict

1313
        network_alphas = None
1314
        # TODO: replace it with a method from `state_dict_utils`
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
        if all(
            (
                k.startswith("lora_te_")
                or k.startswith("lora_unet_")
                or k.startswith("lora_te1_")
                or k.startswith("lora_te2_")
            )
            for k in state_dict.keys()
        ):
            # Map SDXL blocks correctly.
            if unet_config is not None:
                # use unet config to remap block numbers
1327
                state_dict = cls._maybe_map_sgm_blocks_to_diffusers(state_dict, unet_config)
1328
            state_dict, network_alphas = cls._convert_kohya_lora_to_diffusers(state_dict)
Will Berman's avatar
Will Berman committed
1329

1330
        return state_dict, network_alphas
Will Berman's avatar
Will Berman committed
1331

1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
    @classmethod
    def _best_guess_weight_name(cls, pretrained_model_name_or_path_or_dict, file_extension=".safetensors"):
        targeted_files = []

        if os.path.isfile(pretrained_model_name_or_path_or_dict):
            return
        elif os.path.isdir(pretrained_model_name_or_path_or_dict):
            targeted_files = [
                f for f in os.listdir(pretrained_model_name_or_path_or_dict) if f.endswith(file_extension)
            ]
        else:
            files_in_repo = model_info(pretrained_model_name_or_path_or_dict).siblings
            targeted_files = [f.rfilename for f in files_in_repo if f.rfilename.endswith(file_extension)]
        if len(targeted_files) == 0:
            return

1348
1349
1350
1351
1352
1353
1354
1355
        # "scheduler" does not correspond to a LoRA checkpoint.
        # "optimizer" does not correspond to a LoRA checkpoint
        # only top-level checkpoints are considered and not the other ones, hence "checkpoint".
        unallowed_substrings = {"scheduler", "optimizer", "checkpoint"}
        targeted_files = list(
            filter(lambda x: all(substring not in x for substring in unallowed_substrings), targeted_files)
        )

1356
1357
1358
1359
1360
1361
1362
        if len(targeted_files) > 1:
            raise ValueError(
                f"Provided path contains more than one weights file in the {file_extension} format. Either specify `weight_name` in `load_lora_weights` or make sure there's only one  `.safetensors` or `.bin` file in  {pretrained_model_name_or_path_or_dict}."
            )
        weight_name = targeted_files[0]
        return weight_name

Will Berman's avatar
Will Berman committed
1363
    @classmethod
1364
1365
    def _maybe_map_sgm_blocks_to_diffusers(cls, state_dict, unet_config, delimiter="_", block_slice_pos=5):
        # 1. get all state_dict_keys
chillpixel's avatar
chillpixel committed
1366
        all_keys = list(state_dict.keys())
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
        sgm_patterns = ["input_blocks", "middle_block", "output_blocks"]

        # 2. check if needs remapping, if not return original dict
        is_in_sgm_format = False
        for key in all_keys:
            if any(p in key for p in sgm_patterns):
                is_in_sgm_format = True
                break

        if not is_in_sgm_format:
            return state_dict

        # 3. Else remap from SGM patterns
1380
1381
1382
1383
1384
        new_state_dict = {}
        inner_block_map = ["resnets", "attentions", "upsamplers"]

        # Retrieves # of down, mid and up blocks
        input_block_ids, middle_block_ids, output_block_ids = set(), set(), set()
1385
1386
1387
1388
1389

        for layer in all_keys:
            if "text" in layer:
                new_state_dict[layer] = state_dict.pop(layer)
            else:
1390
                layer_id = int(layer.split(delimiter)[:block_slice_pos][-1])
1391
                if sgm_patterns[0] in layer:
1392
                    input_block_ids.add(layer_id)
1393
                elif sgm_patterns[1] in layer:
1394
                    middle_block_ids.add(layer_id)
1395
                elif sgm_patterns[2] in layer:
1396
1397
                    output_block_ids.add(layer_id)
                else:
1398
                    raise ValueError(f"Checkpoint not supported because layer {layer} not supported.")
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460

        input_blocks = {
            layer_id: [key for key in state_dict if f"input_blocks{delimiter}{layer_id}" in key]
            for layer_id in input_block_ids
        }
        middle_blocks = {
            layer_id: [key for key in state_dict if f"middle_block{delimiter}{layer_id}" in key]
            for layer_id in middle_block_ids
        }
        output_blocks = {
            layer_id: [key for key in state_dict if f"output_blocks{delimiter}{layer_id}" in key]
            for layer_id in output_block_ids
        }

        # Rename keys accordingly
        for i in input_block_ids:
            block_id = (i - 1) // (unet_config.layers_per_block + 1)
            layer_in_block_id = (i - 1) % (unet_config.layers_per_block + 1)

            for key in input_blocks[i]:
                inner_block_id = int(key.split(delimiter)[block_slice_pos])
                inner_block_key = inner_block_map[inner_block_id] if "op" not in key else "downsamplers"
                inner_layers_in_block = str(layer_in_block_id) if "op" not in key else "0"
                new_key = delimiter.join(
                    key.split(delimiter)[: block_slice_pos - 1]
                    + [str(block_id), inner_block_key, inner_layers_in_block]
                    + key.split(delimiter)[block_slice_pos + 1 :]
                )
                new_state_dict[new_key] = state_dict.pop(key)

        for i in middle_block_ids:
            key_part = None
            if i == 0:
                key_part = [inner_block_map[0], "0"]
            elif i == 1:
                key_part = [inner_block_map[1], "0"]
            elif i == 2:
                key_part = [inner_block_map[0], "1"]
            else:
                raise ValueError(f"Invalid middle block id {i}.")

            for key in middle_blocks[i]:
                new_key = delimiter.join(
                    key.split(delimiter)[: block_slice_pos - 1] + key_part + key.split(delimiter)[block_slice_pos:]
                )
                new_state_dict[new_key] = state_dict.pop(key)

        for i in output_block_ids:
            block_id = i // (unet_config.layers_per_block + 1)
            layer_in_block_id = i % (unet_config.layers_per_block + 1)

            for key in output_blocks[i]:
                inner_block_id = int(key.split(delimiter)[block_slice_pos])
                inner_block_key = inner_block_map[inner_block_id]
                inner_layers_in_block = str(layer_in_block_id) if inner_block_id < 2 else "0"
                new_key = delimiter.join(
                    key.split(delimiter)[: block_slice_pos - 1]
                    + [str(block_id), inner_block_key, inner_layers_in_block]
                    + key.split(delimiter)[block_slice_pos + 1 :]
                )
                new_state_dict[new_key] = state_dict.pop(key)

1461
        if len(state_dict) > 0:
1462
1463
1464
1465
1466
            raise ValueError("At this point all state dict entries have to be converted.")

        return new_state_dict

    @classmethod
1467
    def load_lora_into_unet(cls, state_dict, network_alphas, unet, low_cpu_mem_usage=None, _pipeline=None):
Will Berman's avatar
Will Berman committed
1468
        """
1469
        This will load the LoRA layers specified in `state_dict` into `unet`.
Will Berman's avatar
Will Berman committed
1470
1471
1472
1473
1474
1475

        Parameters:
            state_dict (`dict`):
                A standard state dict containing the lora layer parameters. The keys can either be indexed directly
                into the unet or prefixed with an additional `unet` which can be used to distinguish between text
                encoder lora layers.
1476
            network_alphas (`Dict[str, float]`):
Will Berman's avatar
Will Berman committed
1477
1478
1479
                See `LoRALinearLayer` for more details.
            unet (`UNet2DConditionModel`):
                The UNet model to load the LoRA layers into.
1480
1481
1482
1483
1484
            low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
                Speed up model loading only loading the pretrained weights and not initializing the weights. This also
                tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
                Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
                argument to `True` will raise an error.
Will Berman's avatar
Will Berman committed
1485
        """
1486
        low_cpu_mem_usage = low_cpu_mem_usage if low_cpu_mem_usage is not None else _LOW_CPU_MEM_USAGE_DEFAULT
1487
1488
1489
1490
        # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918),
        # then the `state_dict` keys should have `self.unet_name` and/or `self.text_encoder_name` as
        # their prefixes.
        keys = list(state_dict.keys())
1491

Will Berman's avatar
Will Berman committed
1492
        if all(key.startswith(cls.unet_name) or key.startswith(cls.text_encoder_name) for key in keys):
1493
            # Load the layers corresponding to UNet.
Will Berman's avatar
Will Berman committed
1494
            logger.info(f"Loading {cls.unet_name}.")
1495

1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
            unet_keys = [k for k in keys if k.startswith(cls.unet_name)]
            state_dict = {k.replace(f"{cls.unet_name}.", ""): v for k, v in state_dict.items() if k in unet_keys}

            if network_alphas is not None:
                alpha_keys = [k for k in network_alphas.keys() if k.startswith(cls.unet_name)]
                network_alphas = {
                    k.replace(f"{cls.unet_name}.", ""): v for k, v in network_alphas.items() if k in alpha_keys
                }

        else:
            # Otherwise, we're dealing with the old format. This means the `state_dict` should only
            # contain the module names of the `unet` as its keys WITHOUT any prefix.
zideliu's avatar
zideliu committed
1508
            warn_message = "You have saved the LoRA weights using the old format. To convert the old LoRA weights to the new format, you can first load them in a dictionary and then create a new dictionary like the following: `new_state_dict = {f'unet.{module_name}': params for module_name, params in old_state_dict.items()}`."
1509
            logger.warn(warn_message)
1510

1511
1512
1513
        unet.load_attn_procs(
            state_dict, network_alphas=network_alphas, low_cpu_mem_usage=low_cpu_mem_usage, _pipeline=_pipeline
        )
1514

Will Berman's avatar
Will Berman committed
1515
    @classmethod
1516
    def load_lora_into_text_encoder(
1517
1518
1519
1520
1521
1522
1523
        cls,
        state_dict,
        network_alphas,
        text_encoder,
        prefix=None,
        lora_scale=1.0,
        low_cpu_mem_usage=None,
1524
        adapter_name=None,
1525
        _pipeline=None,
1526
    ):
Will Berman's avatar
Will Berman committed
1527
1528
1529
1530
1531
        """
        This will load the LoRA layers specified in `state_dict` into `text_encoder`

        Parameters:
            state_dict (`dict`):
1532
                A standard state dict containing the lora layer parameters. The key should be prefixed with an
Will Berman's avatar
Will Berman committed
1533
                additional `text_encoder` to distinguish between unet lora layers.
1534
            network_alphas (`Dict[str, float]`):
Will Berman's avatar
Will Berman committed
1535
1536
1537
                See `LoRALinearLayer` for more details.
            text_encoder (`CLIPTextModel`):
                The text encoder model to load the LoRA layers into.
1538
1539
            prefix (`str`):
                Expected prefix of the `text_encoder` in the `state_dict`.
Will Berman's avatar
Will Berman committed
1540
1541
1542
            lora_scale (`float`):
                How much to scale the output of the lora linear layer before it is added with the output of the regular
                lora layer.
1543
1544
1545
1546
1547
            low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
                Speed up model loading only loading the pretrained weights and not initializing the weights. This also
                tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
                Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
                argument to `True` will raise an error.
1548
1549
1550
            adapter_name (`str`, *optional*):
                Adapter name to be used for referencing the loaded adapter model. If not specified, it will use
                `default_{i}` where i is the total number of adapters being loaded.
Will Berman's avatar
Will Berman committed
1551
        """
1552
        low_cpu_mem_usage = low_cpu_mem_usage if low_cpu_mem_usage is not None else _LOW_CPU_MEM_USAGE_DEFAULT
Will Berman's avatar
Will Berman committed
1553
1554
1555
1556
1557

        # If the serialization format is new (introduced in https://github.com/huggingface/diffusers/pull/2918),
        # then the `state_dict` keys should have `self.unet_name` and/or `self.text_encoder_name` as
        # their prefixes.
        keys = list(state_dict.keys())
1558
1559
        prefix = cls.text_encoder_name if prefix is None else prefix

1560
        # Safe prefix to check with.
1561
        if any(cls.text_encoder_name in key for key in keys):
Will Berman's avatar
Will Berman committed
1562
            # Load the layers corresponding to text encoder and make necessary adjustments.
1563
            text_encoder_keys = [k for k in keys if k.startswith(prefix) and k.split(".")[0] == prefix]
Will Berman's avatar
Will Berman committed
1564
            text_encoder_lora_state_dict = {
1565
                k.replace(f"{prefix}.", ""): v for k, v in state_dict.items() if k in text_encoder_keys
Will Berman's avatar
Will Berman committed
1566
            }
1567

Will Berman's avatar
Will Berman committed
1568
            if len(text_encoder_lora_state_dict) > 0:
1569
                logger.info(f"Loading {prefix}.")
1570
                rank = {}
1571
                text_encoder_lora_state_dict = convert_state_dict_to_diffusers(text_encoder_lora_state_dict)
Will Berman's avatar
Will Berman committed
1572

1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
                if cls.use_peft_backend:
                    # convert state dict
                    text_encoder_lora_state_dict = convert_state_dict_to_peft(text_encoder_lora_state_dict)

                    for name, _ in text_encoder_attn_modules(text_encoder):
                        rank_key = f"{name}.out_proj.lora_B.weight"
                        rank[rank_key] = text_encoder_lora_state_dict[rank_key].shape[1]

                    patch_mlp = any(".mlp." in key for key in text_encoder_lora_state_dict.keys())
                    if patch_mlp:
                        for name, _ in text_encoder_mlp_modules(text_encoder):
                            rank_key_fc1 = f"{name}.fc1.lora_B.weight"
                            rank_key_fc2 = f"{name}.fc2.lora_B.weight"
                            rank[rank_key_fc1] = text_encoder_lora_state_dict[rank_key_fc1].shape[1]
                            rank[rank_key_fc2] = text_encoder_lora_state_dict[rank_key_fc2].shape[1]
                else:
Will Berman's avatar
Will Berman committed
1589
                    for name, _ in text_encoder_attn_modules(text_encoder):
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
                        rank_key = f"{name}.out_proj.lora_linear_layer.up.weight"
                        rank.update({rank_key: text_encoder_lora_state_dict[rank_key].shape[1]})

                    patch_mlp = any(".mlp." in key for key in text_encoder_lora_state_dict.keys())
                    if patch_mlp:
                        for name, _ in text_encoder_mlp_modules(text_encoder):
                            rank_key_fc1 = f"{name}.fc1.lora_linear_layer.up.weight"
                            rank_key_fc2 = f"{name}.fc2.lora_linear_layer.up.weight"
                            rank[rank_key_fc1] = text_encoder_lora_state_dict[rank_key_fc1].shape[1]
                            rank[rank_key_fc2] = text_encoder_lora_state_dict[rank_key_fc2].shape[1]
Will Berman's avatar
Will Berman committed
1600

1601
1602
1603
1604
1605
1606
1607
1608
                if network_alphas is not None:
                    alpha_keys = [
                        k for k in network_alphas.keys() if k.startswith(prefix) and k.split(".")[0] == prefix
                    ]
                    network_alphas = {
                        k.replace(f"{prefix}.", ""): v for k, v in network_alphas.items() if k in alpha_keys
                    }

1609
1610
                if cls.use_peft_backend:
                    from peft import LoraConfig
Will Berman's avatar
Will Berman committed
1611

1612
                    lora_config_kwargs = get_peft_kwargs(rank, network_alphas, text_encoder_lora_state_dict)
1613

1614
                    lora_config = LoraConfig(**lora_config_kwargs)
1615

1616
1617
1618
                    # adapter_name
                    if adapter_name is None:
                        adapter_name = get_adapter_name(text_encoder)
1619

1620
1621
1622
1623
1624
1625
1626
1627
                    # inject LoRA layers and load the state dict
                    text_encoder.load_adapter(
                        adapter_name=adapter_name,
                        adapter_state_dict=text_encoder_lora_state_dict,
                        peft_config=lora_config,
                    )
                    # scale LoRA layers with `lora_scale`
                    scale_lora_layers(text_encoder, weight=lora_scale)
1628
1629
1630

                    is_model_cpu_offload = False
                    is_sequential_cpu_offload = False
1631
                else:
1632
1633
1634
1635
1636
1637
1638
1639
                    cls._modify_text_encoder(
                        text_encoder,
                        lora_scale,
                        network_alphas,
                        rank=rank,
                        patch_mlp=patch_mlp,
                        low_cpu_mem_usage=low_cpu_mem_usage,
                    )
1640

1641
1642
1643
                    is_pipeline_offloaded = _pipeline is not None and any(
                        isinstance(c, torch.nn.Module) and hasattr(c, "_hf_hook")
                        for c in _pipeline.components.values()
Will Berman's avatar
Will Berman committed
1644
                    )
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
                    if is_pipeline_offloaded and low_cpu_mem_usage:
                        low_cpu_mem_usage = True
                        logger.info(
                            f"Pipeline {_pipeline.__class__} is offloaded. Therefore low cpu mem usage loading is forced."
                        )

                    if low_cpu_mem_usage:
                        device = next(iter(text_encoder_lora_state_dict.values())).device
                        dtype = next(iter(text_encoder_lora_state_dict.values())).dtype
                        unexpected_keys = load_model_dict_into_meta(
                            text_encoder, text_encoder_lora_state_dict, device=device, dtype=dtype
                        )
                    else:
                        load_state_dict_results = text_encoder.load_state_dict(
                            text_encoder_lora_state_dict, strict=False
                        )
                        unexpected_keys = load_state_dict_results.unexpected_keys
Will Berman's avatar
Will Berman committed
1662

1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
                    if len(unexpected_keys) != 0:
                        raise ValueError(
                            f"failed to load text encoder state dict, unexpected keys: {load_state_dict_results.unexpected_keys}"
                        )

                    # <Unsafe code
                    # We can be sure that the following works as all we do is change the dtype and device of the text encoder
                    # Now we remove any existing hooks to
                    is_model_cpu_offload = False
                    is_sequential_cpu_offload = False
                    if _pipeline is not None:
                        for _, component in _pipeline.components.items():
                            if isinstance(component, torch.nn.Module):
                                if hasattr(component, "_hf_hook"):
                                    is_model_cpu_offload = isinstance(getattr(component, "_hf_hook"), CpuOffload)
                                    is_sequential_cpu_offload = isinstance(
                                        getattr(component, "_hf_hook"), AlignDevicesHook
                                    )
                                    logger.info(
                                        "Accelerate hooks detected. Since you have called `load_lora_weights()`, the previous hooks will be first removed. Then the LoRA parameters will be loaded and the hooks will be applied again."
                                    )
                                    remove_hook_from_module(component, recurse=is_sequential_cpu_offload)
1685

1686
1687
                text_encoder.to(device=text_encoder.device, dtype=text_encoder.dtype)

1688
1689
1690
1691
1692
1693
1694
                # Offload back.
                if is_model_cpu_offload:
                    _pipeline.enable_model_cpu_offload()
                elif is_sequential_cpu_offload:
                    _pipeline.enable_sequential_cpu_offload()
                # Unsafe code />

1695
1696
1697
1698
1699
1700
    @property
    def lora_scale(self) -> float:
        # property function that returns the lora scale which can be set at run time by the pipeline.
        # if _lora_scale has not been set, return 1
        return self._lora_scale if hasattr(self, "_lora_scale") else 1.0

1701
    def _remove_text_encoder_monkey_patch(self):
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
        if self.use_peft_backend:
            remove_method = recurse_remove_peft_layers
        else:
            remove_method = self._remove_text_encoder_monkey_patch_classmethod

        if hasattr(self, "text_encoder"):
            remove_method(self.text_encoder)

            if self.use_peft_backend:
                del self.text_encoder.peft_config
                self.text_encoder._hf_peft_config_loaded = None
        if hasattr(self, "text_encoder_2"):
            remove_method(self.text_encoder_2)
            if self.use_peft_backend:
                del self.text_encoder_2.peft_config
                self.text_encoder_2._hf_peft_config_loaded = None
Will Berman's avatar
Will Berman committed
1718
1719
1720

    @classmethod
    def _remove_text_encoder_monkey_patch_classmethod(cls, text_encoder):
1721
1722
        if version.parse(__version__) > version.parse("0.23"):
            deprecate("_remove_text_encoder_monkey_patch_classmethod", "0.25", LORA_DEPRECATION_MESSAGE)
1723

Will Berman's avatar
Will Berman committed
1724
1725
        for _, attn_module in text_encoder_attn_modules(text_encoder):
            if isinstance(attn_module.q_proj, PatchedLoraProjection):
Patrick von Platen's avatar
Patrick von Platen committed
1726
1727
1728
1729
                attn_module.q_proj.lora_linear_layer = None
                attn_module.k_proj.lora_linear_layer = None
                attn_module.v_proj.lora_linear_layer = None
                attn_module.out_proj.lora_linear_layer = None
Will Berman's avatar
Will Berman committed
1730

1731
1732
        for _, mlp_module in text_encoder_mlp_modules(text_encoder):
            if isinstance(mlp_module.fc1, PatchedLoraProjection):
Patrick von Platen's avatar
Patrick von Platen committed
1733
1734
                mlp_module.fc1.lora_linear_layer = None
                mlp_module.fc2.lora_linear_layer = None
1735

Will Berman's avatar
Will Berman committed
1736
    @classmethod
1737
1738
1739
1740
    def _modify_text_encoder(
        cls,
        text_encoder,
        lora_scale=1,
1741
        network_alphas=None,
1742
        rank: Union[Dict[str, int], int] = 4,
1743
1744
        dtype=None,
        patch_mlp=False,
1745
        low_cpu_mem_usage=False,
1746
    ):
1747
1748
1749
        r"""
        Monkey-patches the forward passes of attention modules of the text encoder.
        """
1750
1751
        if version.parse(__version__) > version.parse("0.23"):
            deprecate("_modify_text_encoder", "0.25", LORA_DEPRECATION_MESSAGE)
1752

1753
1754
1755
1756
1757
1758
1759
1760
1761
        def create_patched_linear_lora(model, network_alpha, rank, dtype, lora_parameters):
            linear_layer = model.regular_linear_layer if isinstance(model, PatchedLoraProjection) else model
            ctx = init_empty_weights if low_cpu_mem_usage else nullcontext
            with ctx():
                model = PatchedLoraProjection(linear_layer, lora_scale, network_alpha, rank, dtype=dtype)

            lora_parameters.extend(model.lora_linear_layer.parameters())
            return model

1762
        # First, remove any monkey-patch that might have been applied before
Will Berman's avatar
Will Berman committed
1763
        cls._remove_text_encoder_monkey_patch_classmethod(text_encoder)
1764

Will Berman's avatar
Will Berman committed
1765
        lora_parameters = []
1766
        network_alphas = {} if network_alphas is None else network_alphas
1767
        is_network_alphas_populated = len(network_alphas) > 0
1768
1769

        for name, attn_module in text_encoder_attn_modules(text_encoder):
1770
1771
1772
1773
            query_alpha = network_alphas.pop(name + ".to_q_lora.down.weight.alpha", None)
            key_alpha = network_alphas.pop(name + ".to_k_lora.down.weight.alpha", None)
            value_alpha = network_alphas.pop(name + ".to_v_lora.down.weight.alpha", None)
            out_alpha = network_alphas.pop(name + ".to_out_lora.down.weight.alpha", None)
1774

1775
1776
1777
1778
1779
            if isinstance(rank, dict):
                current_rank = rank.pop(f"{name}.out_proj.lora_linear_layer.up.weight")
            else:
                current_rank = rank

1780
1781
            attn_module.q_proj = create_patched_linear_lora(
                attn_module.q_proj, query_alpha, current_rank, dtype, lora_parameters
Patrick von Platen's avatar
Patrick von Platen committed
1782
            )
1783
1784
            attn_module.k_proj = create_patched_linear_lora(
                attn_module.k_proj, key_alpha, current_rank, dtype, lora_parameters
1785
            )
1786
1787
            attn_module.v_proj = create_patched_linear_lora(
                attn_module.v_proj, value_alpha, current_rank, dtype, lora_parameters
Patrick von Platen's avatar
Patrick von Platen committed
1788
            )
1789
1790
            attn_module.out_proj = create_patched_linear_lora(
                attn_module.out_proj, out_alpha, current_rank, dtype, lora_parameters
Will Berman's avatar
Will Berman committed
1791
            )
1792

1793
        if patch_mlp:
1794
            for name, mlp_module in text_encoder_mlp_modules(text_encoder):
1795
1796
1797
                fc1_alpha = network_alphas.pop(name + ".fc1.lora_linear_layer.down.weight.alpha", None)
                fc2_alpha = network_alphas.pop(name + ".fc2.lora_linear_layer.down.weight.alpha", None)

1798
1799
                current_rank_fc1 = rank.pop(f"{name}.fc1.lora_linear_layer.up.weight")
                current_rank_fc2 = rank.pop(f"{name}.fc2.lora_linear_layer.up.weight")
1800

1801
1802
                mlp_module.fc1 = create_patched_linear_lora(
                    mlp_module.fc1, fc1_alpha, current_rank_fc1, dtype, lora_parameters
Patrick von Platen's avatar
Patrick von Platen committed
1803
                )
1804
1805
                mlp_module.fc2 = create_patched_linear_lora(
                    mlp_module.fc2, fc2_alpha, current_rank_fc2, dtype, lora_parameters
1806
1807
                )

1808
1809
1810
1811
1812
        if is_network_alphas_populated and len(network_alphas) > 0:
            raise ValueError(
                f"The `network_alphas` has to be empty at this point but has the following keys \n\n {', '.join(network_alphas.keys())}"
            )

Will Berman's avatar
Will Berman committed
1813
        return lora_parameters
1814
1815
1816
1817
1818

    @classmethod
    def save_lora_weights(
        self,
        save_directory: Union[str, os.PathLike],
1819
        unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
1820
1821
1822
1823
        text_encoder_lora_layers: Dict[str, torch.nn.Module] = None,
        is_main_process: bool = True,
        weight_name: str = None,
        save_function: Callable = None,
1824
        safe_serialization: bool = True,
1825
1826
    ):
        r"""
Steven Liu's avatar
Steven Liu committed
1827
        Save the LoRA parameters corresponding to the UNet and text encoder.
1828
1829
1830

        Arguments:
            save_directory (`str` or `os.PathLike`):
Steven Liu's avatar
Steven Liu committed
1831
                Directory to save LoRA parameters to. Will be created if it doesn't exist.
1832
            unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
1833
1834
                State dict of the LoRA layers corresponding to the `unet`.
            text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
Steven Liu's avatar
Steven Liu committed
1835
                State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
1836
                encoder LoRA state dict because it comes from 🤗 Transformers.
1837
            is_main_process (`bool`, *optional*, defaults to `True`):
Steven Liu's avatar
Steven Liu committed
1838
1839
1840
                Whether the process calling this is the main process or not. Useful during distributed training and you
                need to call this function on all processes. In this case, set `is_main_process=True` only on the main
                process to avoid race conditions.
1841
            save_function (`Callable`):
Steven Liu's avatar
Steven Liu committed
1842
1843
                The function to use to save the state dictionary. Useful during distributed training when you need to
                replace `torch.save` with another method. Can be configured with the environment variable
1844
                `DIFFUSERS_SAVE_MODE`.
1845
1846
            safe_serialization (`bool`, *optional*, defaults to `True`):
                Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
1847
1848
1849
        """
        # Create a flat dictionary.
        state_dict = {}
1850
1851

        # Populate the dictionary.
1852
        if unet_lora_layers is not None:
1853
1854
1855
1856
1857
            weights = (
                unet_lora_layers.state_dict() if isinstance(unet_lora_layers, torch.nn.Module) else unet_lora_layers
            )

            unet_lora_state_dict = {f"{self.unet_name}.{module_name}": param for module_name, param in weights.items()}
1858
            state_dict.update(unet_lora_state_dict)
1859

1860
        if text_encoder_lora_layers is not None:
1861
1862
1863
1864
1865
1866
            weights = (
                text_encoder_lora_layers.state_dict()
                if isinstance(text_encoder_lora_layers, torch.nn.Module)
                else text_encoder_lora_layers
            )

1867
            text_encoder_lora_state_dict = {
1868
                f"{self.text_encoder_name}.{module_name}": param for module_name, param in weights.items()
1869
1870
1871
1872
            }
            state_dict.update(text_encoder_lora_state_dict)

        # Save the model
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
        self.write_lora_layers(
            state_dict=state_dict,
            save_directory=save_directory,
            is_main_process=is_main_process,
            weight_name=weight_name,
            save_function=save_function,
            safe_serialization=safe_serialization,
        )

    def write_lora_layers(
        state_dict: Dict[str, torch.Tensor],
        save_directory: str,
        is_main_process: bool,
        weight_name: str,
        save_function: Callable,
        safe_serialization: bool,
    ):
        if os.path.isfile(save_directory):
            logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
            return

        if save_function is None:
            if safe_serialization:

                def save_function(weights, filename):
                    return safetensors.torch.save_file(weights, filename, metadata={"format": "pt"})

            else:
                save_function = torch.save

        os.makedirs(save_directory, exist_ok=True)

1905
1906
1907
1908
1909
1910
1911
1912
        if weight_name is None:
            if safe_serialization:
                weight_name = LORA_WEIGHT_NAME_SAFE
            else:
                weight_name = LORA_WEIGHT_NAME

        save_function(state_dict, os.path.join(save_directory, weight_name))
        logger.info(f"Model weights saved in {os.path.join(save_directory, weight_name)}")
1lint's avatar
1lint committed
1913

Will Berman's avatar
Will Berman committed
1914
1915
    @classmethod
    def _convert_kohya_lora_to_diffusers(cls, state_dict):
1916
1917
        unet_state_dict = {}
        te_state_dict = {}
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
        te2_state_dict = {}
        network_alphas = {}

        # every down weight has a corresponding up weight and potentially an alpha weight
        lora_keys = [k for k in state_dict.keys() if k.endswith("lora_down.weight")]
        for key in lora_keys:
            lora_name = key.split(".")[0]
            lora_name_up = lora_name + ".lora_up.weight"
            lora_name_alpha = lora_name + ".alpha"

            if lora_name.startswith("lora_unet_"):
                diffusers_name = key.replace("lora_unet_", "").replace("_", ".")

                if "input.blocks" in diffusers_name:
                    diffusers_name = diffusers_name.replace("input.blocks", "down_blocks")
                else:
1934
                    diffusers_name = diffusers_name.replace("down.blocks", "down_blocks")
1935
1936
1937
1938

                if "middle.block" in diffusers_name:
                    diffusers_name = diffusers_name.replace("middle.block", "mid_block")
                else:
1939
                    diffusers_name = diffusers_name.replace("mid.block", "mid_block")
1940
1941
1942
                if "output.blocks" in diffusers_name:
                    diffusers_name = diffusers_name.replace("output.blocks", "up_blocks")
                else:
1943
                    diffusers_name = diffusers_name.replace("up.blocks", "up_blocks")
1944

1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
                diffusers_name = diffusers_name.replace("transformer.blocks", "transformer_blocks")
                diffusers_name = diffusers_name.replace("to.q.lora", "to_q_lora")
                diffusers_name = diffusers_name.replace("to.k.lora", "to_k_lora")
                diffusers_name = diffusers_name.replace("to.v.lora", "to_v_lora")
                diffusers_name = diffusers_name.replace("to.out.0.lora", "to_out_lora")
                diffusers_name = diffusers_name.replace("proj.in", "proj_in")
                diffusers_name = diffusers_name.replace("proj.out", "proj_out")
                diffusers_name = diffusers_name.replace("emb.layers", "time_emb_proj")

                # SDXL specificity.
Sayak Paul's avatar
Sayak Paul committed
1955
                if "emb" in diffusers_name and "time.emb.proj" not in diffusers_name:
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
                    pattern = r"\.\d+(?=\D*$)"
                    diffusers_name = re.sub(pattern, "", diffusers_name, count=1)
                if ".in." in diffusers_name:
                    diffusers_name = diffusers_name.replace("in.layers.2", "conv1")
                if ".out." in diffusers_name:
                    diffusers_name = diffusers_name.replace("out.layers.3", "conv2")
                if "downsamplers" in diffusers_name or "upsamplers" in diffusers_name:
                    diffusers_name = diffusers_name.replace("op", "conv")
                if "skip" in diffusers_name:
                    diffusers_name = diffusers_name.replace("skip.connection", "conv_shortcut")

Sayak Paul's avatar
Sayak Paul committed
1967
                # LyCORIS specificity.
Sayak Paul's avatar
Sayak Paul committed
1968
                if "time.emb.proj" in diffusers_name:
Sayak Paul's avatar
Sayak Paul committed
1969
1970
1971
1972
1973
                    diffusers_name = diffusers_name.replace("time.emb.proj", "time_emb_proj")
                if "conv.shortcut" in diffusers_name:
                    diffusers_name = diffusers_name.replace("conv.shortcut", "conv_shortcut")

                # General coverage.
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
                if "transformer_blocks" in diffusers_name:
                    if "attn1" in diffusers_name or "attn2" in diffusers_name:
                        diffusers_name = diffusers_name.replace("attn1", "attn1.processor")
                        diffusers_name = diffusers_name.replace("attn2", "attn2.processor")
                        unet_state_dict[diffusers_name] = state_dict.pop(key)
                        unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
                    elif "ff" in diffusers_name:
                        unet_state_dict[diffusers_name] = state_dict.pop(key)
                        unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
                elif any(key in diffusers_name for key in ("proj_in", "proj_out")):
                    unet_state_dict[diffusers_name] = state_dict.pop(key)
                    unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
                else:
                    unet_state_dict[diffusers_name] = state_dict.pop(key)
                    unet_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)

            elif lora_name.startswith("lora_te_"):
                diffusers_name = key.replace("lora_te_", "").replace("_", ".")
                diffusers_name = diffusers_name.replace("text.model", "text_model")
                diffusers_name = diffusers_name.replace("self.attn", "self_attn")
                diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
                diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
                diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
                diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
                if "self_attn" in diffusers_name:
                    te_state_dict[diffusers_name] = state_dict.pop(key)
                    te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
                elif "mlp" in diffusers_name:
                    # Be aware that this is the new diffusers convention and the rest of the code might
                    # not utilize it yet.
                    diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
                    te_state_dict[diffusers_name] = state_dict.pop(key)
                    te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)

            # (sayakpaul): Duplicate code. Needs to be cleaned.
            elif lora_name.startswith("lora_te1_"):
                diffusers_name = key.replace("lora_te1_", "").replace("_", ".")
                diffusers_name = diffusers_name.replace("text.model", "text_model")
                diffusers_name = diffusers_name.replace("self.attn", "self_attn")
                diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
                diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
                diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
                diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
                if "self_attn" in diffusers_name:
                    te_state_dict[diffusers_name] = state_dict.pop(key)
                    te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
                elif "mlp" in diffusers_name:
                    # Be aware that this is the new diffusers convention and the rest of the code might
                    # not utilize it yet.
                    diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
                    te_state_dict[diffusers_name] = state_dict.pop(key)
                    te_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)

            # (sayakpaul): Duplicate code. Needs to be cleaned.
            elif lora_name.startswith("lora_te2_"):
                diffusers_name = key.replace("lora_te2_", "").replace("_", ".")
                diffusers_name = diffusers_name.replace("text.model", "text_model")
                diffusers_name = diffusers_name.replace("self.attn", "self_attn")
                diffusers_name = diffusers_name.replace("q.proj.lora", "to_q_lora")
                diffusers_name = diffusers_name.replace("k.proj.lora", "to_k_lora")
                diffusers_name = diffusers_name.replace("v.proj.lora", "to_v_lora")
                diffusers_name = diffusers_name.replace("out.proj.lora", "to_out_lora")
                if "self_attn" in diffusers_name:
                    te2_state_dict[diffusers_name] = state_dict.pop(key)
                    te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)
                elif "mlp" in diffusers_name:
                    # Be aware that this is the new diffusers convention and the rest of the code might
                    # not utilize it yet.
                    diffusers_name = diffusers_name.replace(".lora.", ".lora_linear_layer.")
                    te2_state_dict[diffusers_name] = state_dict.pop(key)
                    te2_state_dict[diffusers_name.replace(".down.", ".up.")] = state_dict.pop(lora_name_up)

            # Rename the alphas so that they can be mapped appropriately.
            if lora_name_alpha in state_dict:
                alpha = state_dict.pop(lora_name_alpha).item()
                if lora_name_alpha.startswith("lora_unet_"):
                    prefix = "unet."
                elif lora_name_alpha.startswith(("lora_te_", "lora_te1_")):
                    prefix = "text_encoder."
                else:
                    prefix = "text_encoder_2."
                new_name = prefix + diffusers_name.split(".lora.")[0] + ".alpha"
                network_alphas.update({new_name: alpha})

        if len(state_dict) > 0:
            raise ValueError(
                f"The following keys have not been correctly be renamed: \n\n {', '.join(state_dict.keys())}"
2061
            )
2062

2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
        logger.info("Kohya-style checkpoint detected.")
        unet_state_dict = {f"{cls.unet_name}.{module_name}": params for module_name, params in unet_state_dict.items()}
        te_state_dict = {
            f"{cls.text_encoder_name}.{module_name}": params for module_name, params in te_state_dict.items()
        }
        te2_state_dict = (
            {f"text_encoder_2.{module_name}": params for module_name, params in te2_state_dict.items()}
            if len(te2_state_dict) > 0
            else None
        )
        if te2_state_dict is not None:
            te_state_dict.update(te2_state_dict)

2076
        new_state_dict = {**unet_state_dict, **te_state_dict}
2077
        return new_state_dict, network_alphas
2078

2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
    def unload_lora_weights(self):
        """
        Unloads the LoRA parameters.

        Examples:

        ```python
        >>> # Assuming `pipeline` is already loaded with the LoRA parameters.
        >>> pipeline.unload_lora_weights()
        >>> ...
        ```
        """
2091
2092
2093
        for _, module in self.unet.named_modules():
            if hasattr(module, "set_lora_layer"):
                module.set_lora_layer(None)
2094

2095
2096
2097
        # Safe to call the following regardless of LoRA.
        self._remove_text_encoder_monkey_patch()

2098
2099
2100
2101
2102
2103
2104
    def fuse_lora(
        self,
        fuse_unet: bool = True,
        fuse_text_encoder: bool = True,
        lora_scale: float = 1.0,
        safe_fusing: bool = False,
    ):
Patrick von Platen's avatar
Patrick von Platen committed
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
        r"""
        Fuses the LoRA parameters into the original parameters of the corresponding blocks.

        <Tip warning={true}>

        This is an experimental API.

        </Tip>

        Args:
            fuse_unet (`bool`, defaults to `True`): Whether to fuse the UNet LoRA parameters.
            fuse_text_encoder (`bool`, defaults to `True`):
                Whether to fuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
                LoRA parameters then it won't have any effect.
2119
2120
            lora_scale (`float`, defaults to 1.0):
                Controls how much to influence the outputs with the LoRA parameters.
2121
2122
            safe_fusing (`bool`, defaults to `False`):
                Whether to check fused weights for NaN values before fusing and if values are NaN not fusing them.
Patrick von Platen's avatar
Patrick von Platen committed
2123
        """
2124
2125
2126
2127
2128
2129
2130
        if fuse_unet or fuse_text_encoder:
            self.num_fused_loras += 1
            if self.num_fused_loras > 1:
                logger.warn(
                    "The current API is supported for operating with a single LoRA file. You are trying to load and fuse more than one LoRA which is not well-supported.",
                )

Patrick von Platen's avatar
Patrick von Platen committed
2131
        if fuse_unet:
2132
            self.unet.fuse_lora(lora_scale, safe_fusing=safe_fusing)
Patrick von Platen's avatar
Patrick von Platen committed
2133

2134
2135
        if self.use_peft_backend:
            from peft.tuners.tuners_utils import BaseTunerLayer
Patrick von Platen's avatar
Patrick von Platen committed
2136

2137
2138
            def fuse_text_encoder_lora(text_encoder, lora_scale=1.0, safe_fusing=False):
                # TODO(Patrick, Younes): enable "safe" fusing
2139
2140
2141
2142
2143
2144
2145
2146
                for module in text_encoder.modules():
                    if isinstance(module, BaseTunerLayer):
                        if lora_scale != 1.0:
                            module.scale_layer(lora_scale)

                        module.merge()

        else:
2147
2148
            if version.parse(__version__) > version.parse("0.23"):
                deprecate("fuse_text_encoder_lora", "0.25", LORA_DEPRECATION_MESSAGE)
2149

2150
            def fuse_text_encoder_lora(text_encoder, lora_scale=1.0, safe_fusing=False):
2151
2152
                for _, attn_module in text_encoder_attn_modules(text_encoder):
                    if isinstance(attn_module.q_proj, PatchedLoraProjection):
2153
2154
2155
2156
                        attn_module.q_proj._fuse_lora(lora_scale, safe_fusing)
                        attn_module.k_proj._fuse_lora(lora_scale, safe_fusing)
                        attn_module.v_proj._fuse_lora(lora_scale, safe_fusing)
                        attn_module.out_proj._fuse_lora(lora_scale, safe_fusing)
2157
2158
2159

                for _, mlp_module in text_encoder_mlp_modules(text_encoder):
                    if isinstance(mlp_module.fc1, PatchedLoraProjection):
2160
2161
                        mlp_module.fc1._fuse_lora(lora_scale, safe_fusing)
                        mlp_module.fc2._fuse_lora(lora_scale, safe_fusing)
Patrick von Platen's avatar
Patrick von Platen committed
2162
2163
2164

        if fuse_text_encoder:
            if hasattr(self, "text_encoder"):
2165
                fuse_text_encoder_lora(self.text_encoder, lora_scale, safe_fusing)
Patrick von Platen's avatar
Patrick von Platen committed
2166
            if hasattr(self, "text_encoder_2"):
2167
                fuse_text_encoder_lora(self.text_encoder_2, lora_scale, safe_fusing)
Patrick von Platen's avatar
Patrick von Platen committed
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188

    def unfuse_lora(self, unfuse_unet: bool = True, unfuse_text_encoder: bool = True):
        r"""
        Reverses the effect of
        [`pipe.fuse_lora()`](https://huggingface.co/docs/diffusers/main/en/api/loaders#diffusers.loaders.LoraLoaderMixin.fuse_lora).

        <Tip warning={true}>

        This is an experimental API.

        </Tip>

        Args:
            unfuse_unet (`bool`, defaults to `True`): Whether to unfuse the UNet LoRA parameters.
            unfuse_text_encoder (`bool`, defaults to `True`):
                Whether to unfuse the text encoder LoRA parameters. If the text encoder wasn't monkey-patched with the
                LoRA parameters then it won't have any effect.
        """
        if unfuse_unet:
            self.unet.unfuse_lora()

2189
        if self.use_peft_backend:
2190
            from peft.tuners.tuners_utils import BaseTunerLayer
2191
2192
2193
2194
2195
2196
2197

            def unfuse_text_encoder_lora(text_encoder):
                for module in text_encoder.modules():
                    if isinstance(module, BaseTunerLayer):
                        module.unmerge()

        else:
2198
2199
            if version.parse(__version__) > version.parse("0.23"):
                deprecate("unfuse_text_encoder_lora", "0.25", LORA_DEPRECATION_MESSAGE)
2200
2201
2202
2203
2204
2205
2206
2207

            def unfuse_text_encoder_lora(text_encoder):
                for _, attn_module in text_encoder_attn_modules(text_encoder):
                    if isinstance(attn_module.q_proj, PatchedLoraProjection):
                        attn_module.q_proj._unfuse_lora()
                        attn_module.k_proj._unfuse_lora()
                        attn_module.v_proj._unfuse_lora()
                        attn_module.out_proj._unfuse_lora()
Patrick von Platen's avatar
Patrick von Platen committed
2208

2209
2210
2211
2212
                for _, mlp_module in text_encoder_mlp_modules(text_encoder):
                    if isinstance(mlp_module.fc1, PatchedLoraProjection):
                        mlp_module.fc1._unfuse_lora()
                        mlp_module.fc2._unfuse_lora()
Patrick von Platen's avatar
Patrick von Platen committed
2213
2214
2215
2216
2217
2218
2219

        if unfuse_text_encoder:
            if hasattr(self, "text_encoder"):
                unfuse_text_encoder_lora(self.text_encoder)
            if hasattr(self, "text_encoder_2"):
                unfuse_text_encoder_lora(self.text_encoder_2)

2220
2221
        self.num_fused_loras -= 1

2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
    def set_adapter_for_text_encoder(
        self,
        adapter_names: Union[List[str], str],
        text_encoder: Optional[PreTrainedModel] = None,
        text_encoder_weights: List[float] = None,
    ):
        """
        Sets the adapter layers for the text encoder.

        Args:
            adapter_names (`List[str]` or `str`):
                The names of the adapters to use.
            text_encoder (`torch.nn.Module`, *optional*):
                The text encoder module to set the adapter layers for. If `None`, it will try to get the `text_encoder`
                attribute.
            text_encoder_weights (`List[float]`, *optional*):
                The weights to use for the text encoder. If `None`, the weights are set to `1.0` for all the adapters.
        """
        if not self.use_peft_backend:
            raise ValueError("PEFT backend is required for this method.")

        def process_weights(adapter_names, weights):
            if weights is None:
                weights = [1.0] * len(adapter_names)
            elif isinstance(weights, float):
                weights = [weights]

            if len(adapter_names) != len(weights):
                raise ValueError(
                    f"Length of adapter names {len(adapter_names)} is not equal to the length of the weights {len(weights)}"
                )
            return weights

        adapter_names = [adapter_names] if isinstance(adapter_names, str) else adapter_names
        text_encoder_weights = process_weights(adapter_names, text_encoder_weights)
        text_encoder = text_encoder or getattr(self, "text_encoder", None)
        if text_encoder is None:
            raise ValueError(
                "The pipeline does not have a default `pipe.text_encoder` class. Please make sure to pass a `text_encoder` instead."
            )
        set_weights_and_activate_adapters(text_encoder, adapter_names, text_encoder_weights)

    def disable_lora_for_text_encoder(self, text_encoder: Optional[PreTrainedModel] = None):
        """
        Disables the LoRA layers for the text encoder.

        Args:
            text_encoder (`torch.nn.Module`, *optional*):
                The text encoder module to disable the LoRA layers for. If `None`, it will try to get the
                `text_encoder` attribute.
        """
        if not self.use_peft_backend:
            raise ValueError("PEFT backend is required for this method.")

        text_encoder = text_encoder or getattr(self, "text_encoder", None)
        if text_encoder is None:
            raise ValueError("Text Encoder not found.")
        set_adapter_layers(text_encoder, enabled=False)

    def enable_lora_for_text_encoder(self, text_encoder: Optional[PreTrainedModel] = None):
        """
        Enables the LoRA layers for the text encoder.

        Args:
            text_encoder (`torch.nn.Module`, *optional*):
                The text encoder module to enable the LoRA layers for. If `None`, it will try to get the `text_encoder`
                attribute.
        """
        if not self.use_peft_backend:
            raise ValueError("PEFT backend is required for this method.")
        text_encoder = text_encoder or getattr(self, "text_encoder", None)
        if text_encoder is None:
            raise ValueError("Text Encoder not found.")
        set_adapter_layers(self.text_encoder, enabled=True)

1lint's avatar
1lint committed
2297

Patrick von Platen's avatar
Patrick von Platen committed
2298
class FromSingleFileMixin:
Steven Liu's avatar
Steven Liu committed
2299
2300
2301
    """
    Load model weights saved in the `.ckpt` format into a [`DiffusionPipeline`].
    """
1lint's avatar
1lint committed
2302
2303

    @classmethod
Patrick von Platen's avatar
Patrick von Platen committed
2304
2305
2306
2307
2308
2309
2310
    def from_ckpt(cls, *args, **kwargs):
        deprecation_message = "The function `from_ckpt` is deprecated in favor of `from_single_file` and will be removed in diffusers v.0.21. Please make sure to use `StableDiffusionPipeline.from_single_file(...)` instead."
        deprecate("from_ckpt", "0.21.0", deprecation_message, standard_warn=False)
        return cls.from_single_file(*args, **kwargs)

    @classmethod
    def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
1lint's avatar
1lint committed
2311
        r"""
2312
2313
        Instantiate a [`DiffusionPipeline`] from pretrained pipeline weights saved in the `.ckpt` or `.safetensors`
        format. The pipeline is set in evaluation mode (`model.eval()`) by default.
1lint's avatar
1lint committed
2314
2315
2316
2317

        Parameters:
            pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
                Can be either:
Steven Liu's avatar
Steven Liu committed
2318
2319
                    - A link to the `.ckpt` file (for example
                      `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
1lint's avatar
1lint committed
2320
2321
                    - A path to a *file* containing all pipeline weights.
            torch_dtype (`str` or `torch.dtype`, *optional*):
Steven Liu's avatar
Steven Liu committed
2322
2323
                Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
                dtype is automatically derived from the model's weights.
1lint's avatar
1lint committed
2324
2325
2326
2327
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            cache_dir (`Union[str, os.PathLike]`, *optional*):
Steven Liu's avatar
Steven Liu committed
2328
2329
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
1lint's avatar
1lint committed
2330
            resume_download (`bool`, *optional*, defaults to `False`):
Steven Liu's avatar
Steven Liu committed
2331
2332
                Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
                incompletely downloaded files are deleted.
1lint's avatar
1lint committed
2333
            proxies (`Dict[str, str]`, *optional*):
Steven Liu's avatar
Steven Liu committed
2334
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
1lint's avatar
1lint committed
2335
2336
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            local_files_only (`bool`, *optional*, defaults to `False`):
2337
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
Steven Liu's avatar
Steven Liu committed
2338
                won't be downloaded from the Hub.
1lint's avatar
1lint committed
2339
            use_auth_token (`str` or *bool*, *optional*):
Steven Liu's avatar
Steven Liu committed
2340
2341
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
1lint's avatar
1lint committed
2342
            revision (`str`, *optional*, defaults to `"main"`):
Steven Liu's avatar
Steven Liu committed
2343
2344
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
2345
            use_safetensors (`bool`, *optional*, defaults to `None`):
Steven Liu's avatar
Steven Liu committed
2346
2347
2348
2349
2350
                If set to `None`, the safetensors weights are downloaded if they're available **and** if the
                safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
                weights. If set to `False`, safetensors weights are not loaded.
            extract_ema (`bool`, *optional*, defaults to `False`):
                Whether to extract the EMA weights or not. Pass `True` to extract the EMA weights which usually yield
2351
                higher quality images for inference. Non-EMA weights are usually better for continuing finetuning.
1lint's avatar
1lint committed
2352
            upcast_attention (`bool`, *optional*, defaults to `None`):
Steven Liu's avatar
Steven Liu committed
2353
                Whether the attention computation should always be upcasted.
1lint's avatar
1lint committed
2354
            image_size (`int`, *optional*, defaults to 512):
Steven Liu's avatar
Steven Liu committed
2355
2356
                The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
                Diffusion v2 base model. Use 768 for Stable Diffusion v2.
1lint's avatar
1lint committed
2357
            prediction_type (`str`, *optional*):
Steven Liu's avatar
Steven Liu committed
2358
2359
2360
                The prediction type the model was trained on. Use `'epsilon'` for all Stable Diffusion v1 models and
                the Stable Diffusion v2 base model. Use `'v_prediction'` for Stable Diffusion v2.
            num_in_channels (`int`, *optional*, defaults to `None`):
2361
                The number of input channels. If `None`, it is automatically inferred.
Steven Liu's avatar
Steven Liu committed
2362
            scheduler_type (`str`, *optional*, defaults to `"pndm"`):
1lint's avatar
1lint committed
2363
2364
2365
                Type of scheduler to use. Should be one of `["pndm", "lms", "heun", "euler", "euler-ancestral", "dpm",
                "ddim"]`.
            load_safety_checker (`bool`, *optional*, defaults to `True`):
Steven Liu's avatar
Steven Liu committed
2366
                Whether to load the safety checker or not.
2367
2368
2369
2370
            text_encoder ([`~transformers.CLIPTextModel`], *optional*, defaults to `None`):
                An instance of `CLIPTextModel` to use, specifically the
                [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. If this
                parameter is `None`, the function loads a new instance of `CLIPTextModel` by itself if needed.
2371
2372
2373
            vae (`AutoencoderKL`, *optional*, defaults to `None`):
                Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. If
                this parameter is `None`, the function will load a new instance of [CLIP] by itself, if needed.
2374
2375
2376
            tokenizer ([`~transformers.CLIPTokenizer`], *optional*, defaults to `None`):
                An instance of `CLIPTokenizer` to use. If this parameter is `None`, the function loads a new instance
                of `CLIPTokenizer` by itself if needed.
2377
2378
2379
            original_config_file (`str`):
                Path to `.yaml` config file corresponding to the original architecture. If `None`, will be
                automatically inferred by looking for a key that only exists in SD2.0 models.
1lint's avatar
1lint committed
2380
            kwargs (remaining dictionary of keyword arguments, *optional*):
Steven Liu's avatar
Steven Liu committed
2381
2382
2383
                Can be used to overwrite load and saveable variables (for example the pipeline components of the
                specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
                method. See example below for more information.
1lint's avatar
1lint committed
2384
2385
2386
2387
2388
2389
2390

        Examples:

        ```py
        >>> from diffusers import StableDiffusionPipeline

        >>> # Download pipeline from huggingface.co and cache.
Patrick von Platen's avatar
Patrick von Platen committed
2391
        >>> pipeline = StableDiffusionPipeline.from_single_file(
1lint's avatar
1lint committed
2392
2393
2394
2395
2396
        ...     "https://huggingface.co/WarriorMama777/OrangeMixs/blob/main/Models/AbyssOrangeMix/AbyssOrangeMix.safetensors"
        ... )

        >>> # Download pipeline from local file
        >>> # file is downloaded under ./v1-5-pruned-emaonly.ckpt
Patrick von Platen's avatar
Patrick von Platen committed
2397
        >>> pipeline = StableDiffusionPipeline.from_single_file("./v1-5-pruned-emaonly")
1lint's avatar
1lint committed
2398
2399

        >>> # Enable float16 and move to GPU
Patrick von Platen's avatar
Patrick von Platen committed
2400
        >>> pipeline = StableDiffusionPipeline.from_single_file(
1lint's avatar
1lint committed
2401
2402
2403
2404
2405
2406
2407
2408
2409
        ...     "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned-emaonly.ckpt",
        ...     torch_dtype=torch.float16,
        ... )
        >>> pipeline.to("cuda")
        ```
        """
        # import here to avoid circular dependency
        from .pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt

2410
        original_config_file = kwargs.pop("original_config_file", None)
2411
        config_files = kwargs.pop("config_files", None)
1lint's avatar
1lint committed
2412
2413
2414
2415
2416
2417
2418
2419
        cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
        resume_download = kwargs.pop("resume_download", False)
        force_download = kwargs.pop("force_download", False)
        proxies = kwargs.pop("proxies", None)
        local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
        use_auth_token = kwargs.pop("use_auth_token", None)
        revision = kwargs.pop("revision", None)
        extract_ema = kwargs.pop("extract_ema", False)
2420
        image_size = kwargs.pop("image_size", None)
1lint's avatar
1lint committed
2421
2422
2423
2424
2425
        scheduler_type = kwargs.pop("scheduler_type", "pndm")
        num_in_channels = kwargs.pop("num_in_channels", None)
        upcast_attention = kwargs.pop("upcast_attention", None)
        load_safety_checker = kwargs.pop("load_safety_checker", True)
        prediction_type = kwargs.pop("prediction_type", None)
2426
        text_encoder = kwargs.pop("text_encoder", None)
2427
        vae = kwargs.pop("vae", None)
2428
        controlnet = kwargs.pop("controlnet", None)
2429
        tokenizer = kwargs.pop("tokenizer", None)
1lint's avatar
1lint committed
2430
2431
2432

        torch_dtype = kwargs.pop("torch_dtype", None)

2433
        use_safetensors = kwargs.pop("use_safetensors", None)
1lint's avatar
1lint committed
2434
2435
2436
2437
2438

        pipeline_name = cls.__name__
        file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
        from_safetensors = file_extension == "safetensors"

2439
        if from_safetensors and use_safetensors is False:
1lint's avatar
1lint committed
2440
2441
2442
2443
            raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")

        # TODO: For now we only support stable diffusion
        stable_unclip = None
2444
        model_type = None
1lint's avatar
1lint committed
2445

2446
2447
2448
2449
2450
2451
2452
2453
        if pipeline_name in [
            "StableDiffusionControlNetPipeline",
            "StableDiffusionControlNetImg2ImgPipeline",
            "StableDiffusionControlNetInpaintPipeline",
        ]:
            from .models.controlnet import ControlNetModel
            from .pipelines.controlnet.multicontrolnet import MultiControlNetModel

2454
            #  list/tuple or a single instance of ControlNetModel or MultiControlNetModel
Patrick von Platen's avatar
Patrick von Platen committed
2455
2456
2457
2458
2459
            if not (
                isinstance(controlnet, (ControlNetModel, MultiControlNetModel))
                or isinstance(controlnet, (list, tuple))
                and isinstance(controlnet[0], ControlNetModel)
            ):
2460
                raise ValueError("ControlNet needs to be passed if loading from ControlNet pipeline.")
1lint's avatar
1lint committed
2461
        elif "StableDiffusion" in pipeline_name:
2462
2463
            # Model type will be inferred from the checkpoint.
            pass
1lint's avatar
1lint committed
2464
        elif pipeline_name == "StableUnCLIPPipeline":
2465
            model_type = "FrozenOpenCLIPEmbedder"
1lint's avatar
1lint committed
2466
2467
            stable_unclip = "txt2img"
        elif pipeline_name == "StableUnCLIPImg2ImgPipeline":
2468
            model_type = "FrozenOpenCLIPEmbedder"
1lint's avatar
1lint committed
2469
2470
            stable_unclip = "img2img"
        elif pipeline_name == "PaintByExamplePipeline":
2471
            model_type = "PaintByExample"
1lint's avatar
1lint committed
2472
        elif pipeline_name == "LDMTextToImagePipeline":
2473
            model_type = "LDMTextToImage"
1lint's avatar
1lint committed
2474
2475
2476
2477
        else:
            raise ValueError(f"Unhandled pipeline class: {pipeline_name}")

        # remove huggingface url
2478
2479
2480
        has_valid_url_prefix = False
        valid_url_prefixes = ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"]
        for prefix in valid_url_prefixes:
1lint's avatar
1lint committed
2481
2482
            if pretrained_model_link_or_path.startswith(prefix):
                pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]
2483
                has_valid_url_prefix = True
1lint's avatar
1lint committed
2484
2485
2486
2487

        # Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
        ckpt_path = Path(pretrained_model_link_or_path)
        if not ckpt_path.is_file():
2488
2489
2490
2491
2492
            if not has_valid_url_prefix:
                raise ValueError(
                    f"The provided path is either not a file or a valid huggingface URL was not provided. Valid URLs begin with {', '.join(valid_url_prefixes)}"
                )

1lint's avatar
1lint committed
2493
            # get repo_id and (potentially nested) file path of ckpt in repo
2494
2495
            repo_id = "/".join(ckpt_path.parts[:2])
            file_path = "/".join(ckpt_path.parts[2:])
1lint's avatar
1lint committed
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528

            if file_path.startswith("blob/"):
                file_path = file_path[len("blob/") :]

            if file_path.startswith("main/"):
                file_path = file_path[len("main/") :]

            pretrained_model_link_or_path = hf_hub_download(
                repo_id,
                filename=file_path,
                cache_dir=cache_dir,
                resume_download=resume_download,
                proxies=proxies,
                local_files_only=local_files_only,
                use_auth_token=use_auth_token,
                revision=revision,
                force_download=force_download,
            )

        pipe = download_from_original_stable_diffusion_ckpt(
            pretrained_model_link_or_path,
            pipeline_class=cls,
            model_type=model_type,
            stable_unclip=stable_unclip,
            controlnet=controlnet,
            from_safetensors=from_safetensors,
            extract_ema=extract_ema,
            image_size=image_size,
            scheduler_type=scheduler_type,
            num_in_channels=num_in_channels,
            upcast_attention=upcast_attention,
            load_safety_checker=load_safety_checker,
            prediction_type=prediction_type,
2529
            text_encoder=text_encoder,
2530
            vae=vae,
2531
            tokenizer=tokenizer,
2532
            original_config_file=original_config_file,
2533
            config_files=config_files,
1lint's avatar
1lint committed
2534
2535
2536
2537
2538
2539
        )

        if torch_dtype is not None:
            pipe.to(torch_dtype=torch_dtype)

        return pipe
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
2626
2627
2628
2629
2630
2631
2632
2633
2634
2635
2636
2637
2638
2639
2640
2641
2642
2643


class FromOriginalVAEMixin:
    @classmethod
    def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
        r"""
        Instantiate a [`AutoencoderKL`] from pretrained controlnet weights saved in the original `.ckpt` or
        `.safetensors` format. The pipeline is format. The pipeline is set in evaluation mode (`model.eval()`) by
        default.

        Parameters:
            pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
                Can be either:
                    - A link to the `.ckpt` file (for example
                      `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
                    - A path to a *file* containing all pipeline weights.
            torch_dtype (`str` or `torch.dtype`, *optional*):
                Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
                dtype is automatically derived from the model's weights.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            cache_dir (`Union[str, os.PathLike]`, *optional*):
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
            resume_download (`bool`, *optional*, defaults to `False`):
                Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
                incompletely downloaded files are deleted.
            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to True, the model
                won't be downloaded from the Hub.
            use_auth_token (`str` or *bool*, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
            image_size (`int`, *optional*, defaults to 512):
                The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
                Diffusion v2 base model. Use 768 for Stable Diffusion v2.
            use_safetensors (`bool`, *optional*, defaults to `None`):
                If set to `None`, the safetensors weights are downloaded if they're available **and** if the
                safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
                weights. If set to `False`, safetensors weights are not loaded.
            upcast_attention (`bool`, *optional*, defaults to `None`):
                Whether the attention computation should always be upcasted.
            scaling_factor (`float`, *optional*, defaults to 0.18215):
                The component-wise standard deviation of the trained latent space computed using the first batch of the
                training set. This is used to scale the latent space to have unit variance when training the diffusion
                model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
                diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z
                = 1 / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution
                Image Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
            kwargs (remaining dictionary of keyword arguments, *optional*):
                Can be used to overwrite load and saveable variables (for example the pipeline components of the
                specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
                method. See example below for more information.

        <Tip warning={true}>

            Make sure to pass both `image_size` and `scaling_factor` to `from_single_file()` if you want to load
            a VAE that does accompany a stable diffusion model of v2 or higher or SDXL.

        </Tip>

        Examples:

        ```py
        from diffusers import AutoencoderKL

        url = "https://huggingface.co/stabilityai/sd-vae-ft-mse-original/blob/main/vae-ft-mse-840000-ema-pruned.safetensors"  # can also be local file
        model = AutoencoderKL.from_single_file(url)
        ```
        """
        if not is_omegaconf_available():
            raise ValueError(BACKENDS_MAPPING["omegaconf"][1])

        from omegaconf import OmegaConf

        from .models import AutoencoderKL

        # import here to avoid circular dependency
        from .pipelines.stable_diffusion.convert_from_ckpt import (
            convert_ldm_vae_checkpoint,
            create_vae_diffusers_config,
        )

        config_file = kwargs.pop("config_file", None)
        cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
        resume_download = kwargs.pop("resume_download", False)
        force_download = kwargs.pop("force_download", False)
        proxies = kwargs.pop("proxies", None)
        local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
        use_auth_token = kwargs.pop("use_auth_token", None)
        revision = kwargs.pop("revision", None)
        image_size = kwargs.pop("image_size", None)
        scaling_factor = kwargs.pop("scaling_factor", None)
        kwargs.pop("upcast_attention", None)

        torch_dtype = kwargs.pop("torch_dtype", None)

2644
        use_safetensors = kwargs.pop("use_safetensors", None)
2645
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677
2678
2679
2680
2681
2682
2683
2684
2685
2686
2687
2688
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701
2702
2703
2704
2705
2706
2707
2708
2709
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721
2722
2723

        file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
        from_safetensors = file_extension == "safetensors"

        if from_safetensors and use_safetensors is False:
            raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")

        # remove huggingface url
        for prefix in ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"]:
            if pretrained_model_link_or_path.startswith(prefix):
                pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]

        # Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
        ckpt_path = Path(pretrained_model_link_or_path)
        if not ckpt_path.is_file():
            # get repo_id and (potentially nested) file path of ckpt in repo
            repo_id = "/".join(ckpt_path.parts[:2])
            file_path = "/".join(ckpt_path.parts[2:])

            if file_path.startswith("blob/"):
                file_path = file_path[len("blob/") :]

            if file_path.startswith("main/"):
                file_path = file_path[len("main/") :]

            pretrained_model_link_or_path = hf_hub_download(
                repo_id,
                filename=file_path,
                cache_dir=cache_dir,
                resume_download=resume_download,
                proxies=proxies,
                local_files_only=local_files_only,
                use_auth_token=use_auth_token,
                revision=revision,
                force_download=force_download,
            )

        if from_safetensors:
            from safetensors import safe_open

            checkpoint = {}
            with safe_open(pretrained_model_link_or_path, framework="pt", device="cpu") as f:
                for key in f.keys():
                    checkpoint[key] = f.get_tensor(key)
        else:
            checkpoint = torch.load(pretrained_model_link_or_path, map_location="cpu")

        if "state_dict" in checkpoint:
            checkpoint = checkpoint["state_dict"]

        if config_file is None:
            config_url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml"
            config_file = BytesIO(requests.get(config_url).content)

        original_config = OmegaConf.load(config_file)

        # default to sd-v1-5
        image_size = image_size or 512

        vae_config = create_vae_diffusers_config(original_config, image_size=image_size)
        converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config)

        if scaling_factor is None:
            if (
                "model" in original_config
                and "params" in original_config.model
                and "scale_factor" in original_config.model.params
            ):
                vae_scaling_factor = original_config.model.params.scale_factor
            else:
                vae_scaling_factor = 0.18215  # default SD scaling factor

        vae_config["scaling_factor"] = vae_scaling_factor

        ctx = init_empty_weights if is_accelerate_available() else nullcontext
        with ctx():
            vae = AutoencoderKL(**vae_config)

        if is_accelerate_available():
2724
            load_model_dict_into_meta(vae, converted_vae_checkpoint, device="cpu")
2725
2726
2727
2728
        else:
            vae.load_state_dict(converted_vae_checkpoint)

        if torch_dtype is not None:
2729
            vae.to(dtype=torch_dtype)
2730
2731
2732
2733
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
2750
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
2762
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
2777
2778
2779
2780
2781
2782
2783
2784
2785
2786
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799
2800
2801
2802
2803
2804
2805
2806
2807
2808
2809
2810
2811
2812
2813
2814
2815

        return vae


class FromOriginalControlnetMixin:
    @classmethod
    def from_single_file(cls, pretrained_model_link_or_path, **kwargs):
        r"""
        Instantiate a [`ControlNetModel`] from pretrained controlnet weights saved in the original `.ckpt` or
        `.safetensors` format. The pipeline is set in evaluation mode (`model.eval()`) by default.

        Parameters:
            pretrained_model_link_or_path (`str` or `os.PathLike`, *optional*):
                Can be either:
                    - A link to the `.ckpt` file (for example
                      `"https://huggingface.co/<repo_id>/blob/main/<path_to_file>.ckpt"`) on the Hub.
                    - A path to a *file* containing all pipeline weights.
            torch_dtype (`str` or `torch.dtype`, *optional*):
                Override the default `torch.dtype` and load the model with another dtype. If `"auto"` is passed, the
                dtype is automatically derived from the model's weights.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
            cache_dir (`Union[str, os.PathLike]`, *optional*):
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
            resume_download (`bool`, *optional*, defaults to `False`):
                Whether or not to resume downloading the model weights and configuration files. If set to `False`, any
                incompletely downloaded files are deleted.
            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to True, the model
                won't be downloaded from the Hub.
            use_auth_token (`str` or *bool*, *optional*):
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
            use_safetensors (`bool`, *optional*, defaults to `None`):
                If set to `None`, the safetensors weights are downloaded if they're available **and** if the
                safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors
                weights. If set to `False`, safetensors weights are not loaded.
            image_size (`int`, *optional*, defaults to 512):
                The image size the model was trained on. Use 512 for all Stable Diffusion v1 models and the Stable
                Diffusion v2 base model. Use 768 for Stable Diffusion v2.
            upcast_attention (`bool`, *optional*, defaults to `None`):
                Whether the attention computation should always be upcasted.
            kwargs (remaining dictionary of keyword arguments, *optional*):
                Can be used to overwrite load and saveable variables (for example the pipeline components of the
                specific pipeline class). The overwritten components are directly passed to the pipelines `__init__`
                method. See example below for more information.

        Examples:

        ```py
        from diffusers import StableDiffusionControlnetPipeline, ControlNetModel

        url = "https://huggingface.co/lllyasviel/ControlNet-v1-1/blob/main/control_v11p_sd15_canny.pth"  # can also be a local path
        model = ControlNetModel.from_single_file(url)

        url = "https://huggingface.co/runwayml/stable-diffusion-v1-5/blob/main/v1-5-pruned.safetensors"  # can also be a local path
        pipe = StableDiffusionControlnetPipeline.from_single_file(url, controlnet=controlnet)
        ```
        """
        # import here to avoid circular dependency
        from .pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt

        config_file = kwargs.pop("config_file", None)
        cache_dir = kwargs.pop("cache_dir", DIFFUSERS_CACHE)
        resume_download = kwargs.pop("resume_download", False)
        force_download = kwargs.pop("force_download", False)
        proxies = kwargs.pop("proxies", None)
        local_files_only = kwargs.pop("local_files_only", HF_HUB_OFFLINE)
        use_auth_token = kwargs.pop("use_auth_token", None)
        num_in_channels = kwargs.pop("num_in_channels", None)
        use_linear_projection = kwargs.pop("use_linear_projection", None)
        revision = kwargs.pop("revision", None)
        extract_ema = kwargs.pop("extract_ema", False)
        image_size = kwargs.pop("image_size", None)
        upcast_attention = kwargs.pop("upcast_attention", None)

        torch_dtype = kwargs.pop("torch_dtype", None)

2816
        use_safetensors = kwargs.pop("use_safetensors", None)
2817
2818
2819
2820
2821
2822
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
2843
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871
2872
2873
2874

        file_extension = pretrained_model_link_or_path.rsplit(".", 1)[-1]
        from_safetensors = file_extension == "safetensors"

        if from_safetensors and use_safetensors is False:
            raise ValueError("Make sure to install `safetensors` with `pip install safetensors`.")

        # remove huggingface url
        for prefix in ["https://huggingface.co/", "huggingface.co/", "hf.co/", "https://hf.co/"]:
            if pretrained_model_link_or_path.startswith(prefix):
                pretrained_model_link_or_path = pretrained_model_link_or_path[len(prefix) :]

        # Code based on diffusers.pipelines.pipeline_utils.DiffusionPipeline.from_pretrained
        ckpt_path = Path(pretrained_model_link_or_path)
        if not ckpt_path.is_file():
            # get repo_id and (potentially nested) file path of ckpt in repo
            repo_id = "/".join(ckpt_path.parts[:2])
            file_path = "/".join(ckpt_path.parts[2:])

            if file_path.startswith("blob/"):
                file_path = file_path[len("blob/") :]

            if file_path.startswith("main/"):
                file_path = file_path[len("main/") :]

            pretrained_model_link_or_path = hf_hub_download(
                repo_id,
                filename=file_path,
                cache_dir=cache_dir,
                resume_download=resume_download,
                proxies=proxies,
                local_files_only=local_files_only,
                use_auth_token=use_auth_token,
                revision=revision,
                force_download=force_download,
            )

        if config_file is None:
            config_url = "https://raw.githubusercontent.com/lllyasviel/ControlNet/main/models/cldm_v15.yaml"
            config_file = BytesIO(requests.get(config_url).content)

        image_size = image_size or 512

        controlnet = download_controlnet_from_original_ckpt(
            pretrained_model_link_or_path,
            original_config_file=config_file,
            image_size=image_size,
            extract_ema=extract_ema,
            num_in_channels=num_in_channels,
            upcast_attention=upcast_attention,
            from_safetensors=from_safetensors,
            use_linear_projection=use_linear_projection,
        )

        if torch_dtype is not None:
            controlnet.to(torch_dtype=torch_dtype)

        return controlnet
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
2889
2890
2891
2892
2893
2894
2895
2896
2897
2898
2899
2900
2901
2902
2903
2904
2905


class StableDiffusionXLLoraLoaderMixin(LoraLoaderMixin):
    """This class overrides `LoraLoaderMixin` with LoRA loading/saving code that's specific to SDXL"""

    # Overrride to properly handle the loading and unloading of the additional text encoder.
    def load_lora_weights(self, pretrained_model_name_or_path_or_dict: Union[str, Dict[str, torch.Tensor]], **kwargs):
        """
        Load LoRA weights specified in `pretrained_model_name_or_path_or_dict` into `self.unet` and
        `self.text_encoder`.

        All kwargs are forwarded to `self.lora_state_dict`.

        See [`~loaders.LoraLoaderMixin.lora_state_dict`] for more details on how the state dict is loaded.

        See [`~loaders.LoraLoaderMixin.load_lora_into_unet`] for more details on how the state dict is loaded into
        `self.unet`.

        See [`~loaders.LoraLoaderMixin.load_lora_into_text_encoder`] for more details on how the state dict is loaded
        into `self.text_encoder`.

        Parameters:
            pretrained_model_name_or_path_or_dict (`str` or `os.PathLike` or `dict`):
                See [`~loaders.LoraLoaderMixin.lora_state_dict`].
            kwargs (`dict`, *optional*):
                See [`~loaders.LoraLoaderMixin.lora_state_dict`].
        """
        # We could have accessed the unet config from `lora_state_dict()` too. We pass
        # it here explicitly to be able to tell that it's coming from an SDXL
        # pipeline.

2906
        # First, ensure that the checkpoint is a compatible one and can be successfully loaded.
2907
2908
2909
2910
2911
        state_dict, network_alphas = self.lora_state_dict(
            pretrained_model_name_or_path_or_dict,
            unet_config=self.unet.config,
            **kwargs,
        )
2912
2913
2914
        is_correct_format = all("lora" in key for key in state_dict.keys())
        if not is_correct_format:
            raise ValueError("Invalid LoRA checkpoint.")
2915

2916
        self.load_lora_into_unet(state_dict, network_alphas=network_alphas, unet=self.unet, _pipeline=self)
2917
2918
2919
2920
2921
2922
2923
2924
        text_encoder_state_dict = {k: v for k, v in state_dict.items() if "text_encoder." in k}
        if len(text_encoder_state_dict) > 0:
            self.load_lora_into_text_encoder(
                text_encoder_state_dict,
                network_alphas=network_alphas,
                text_encoder=self.text_encoder,
                prefix="text_encoder",
                lora_scale=self.lora_scale,
2925
                _pipeline=self,
2926
2927
2928
2929
2930
2931
2932
2933
2934
2935
            )

        text_encoder_2_state_dict = {k: v for k, v in state_dict.items() if "text_encoder_2." in k}
        if len(text_encoder_2_state_dict) > 0:
            self.load_lora_into_text_encoder(
                text_encoder_2_state_dict,
                network_alphas=network_alphas,
                text_encoder=self.text_encoder_2,
                prefix="text_encoder_2",
                lora_scale=self.lora_scale,
2936
                _pipeline=self,
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954
2955
2956
2957
2958
2959
2960
2961
2962
2963
2964
2965
2966
2967
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
2995
2996
2997
2998
2999
3000
3001
            )

    @classmethod
    def save_lora_weights(
        self,
        save_directory: Union[str, os.PathLike],
        unet_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
        text_encoder_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
        text_encoder_2_lora_layers: Dict[str, Union[torch.nn.Module, torch.Tensor]] = None,
        is_main_process: bool = True,
        weight_name: str = None,
        save_function: Callable = None,
        safe_serialization: bool = True,
    ):
        r"""
        Save the LoRA parameters corresponding to the UNet and text encoder.

        Arguments:
            save_directory (`str` or `os.PathLike`):
                Directory to save LoRA parameters to. Will be created if it doesn't exist.
            unet_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `unet`.
            text_encoder_lora_layers (`Dict[str, torch.nn.Module]` or `Dict[str, torch.Tensor]`):
                State dict of the LoRA layers corresponding to the `text_encoder`. Must explicitly pass the text
                encoder LoRA state dict because it comes from 🤗 Transformers.
            is_main_process (`bool`, *optional*, defaults to `True`):
                Whether the process calling this is the main process or not. Useful during distributed training and you
                need to call this function on all processes. In this case, set `is_main_process=True` only on the main
                process to avoid race conditions.
            save_function (`Callable`):
                The function to use to save the state dictionary. Useful during distributed training when you need to
                replace `torch.save` with another method. Can be configured with the environment variable
                `DIFFUSERS_SAVE_MODE`.
            safe_serialization (`bool`, *optional*, defaults to `True`):
                Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.
        """
        state_dict = {}

        def pack_weights(layers, prefix):
            layers_weights = layers.state_dict() if isinstance(layers, torch.nn.Module) else layers
            layers_state_dict = {f"{prefix}.{module_name}": param for module_name, param in layers_weights.items()}
            return layers_state_dict

        if not (unet_lora_layers or text_encoder_lora_layers or text_encoder_2_lora_layers):
            raise ValueError(
                "You must pass at least one of `unet_lora_layers`, `text_encoder_lora_layers` or `text_encoder_2_lora_layers`."
            )

        if unet_lora_layers:
            state_dict.update(pack_weights(unet_lora_layers, "unet"))

        if text_encoder_lora_layers and text_encoder_2_lora_layers:
            state_dict.update(pack_weights(text_encoder_lora_layers, "text_encoder"))
            state_dict.update(pack_weights(text_encoder_2_lora_layers, "text_encoder_2"))

        self.write_lora_layers(
            state_dict=state_dict,
            save_directory=save_directory,
            is_main_process=is_main_process,
            weight_name=weight_name,
            save_function=save_function,
            safe_serialization=safe_serialization,
        )

    def _remove_text_encoder_monkey_patch(self):
3002
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
        if self.use_peft_backend:
            recurse_remove_peft_layers(self.text_encoder)
            # TODO: @younesbelkada handle this in transformers side
            del self.text_encoder.peft_config
            self.text_encoder._hf_peft_config_loaded = None

            recurse_remove_peft_layers(self.text_encoder_2)

            del self.text_encoder_2.peft_config
            self.text_encoder_2._hf_peft_config_loaded = None
        else:
            self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder)
            self._remove_text_encoder_monkey_patch_classmethod(self.text_encoder_2)