ip_adapter.py 17.1 KB
Newer Older
1
# Copyright 2024 The HuggingFace Team. All rights reserved.
2
3
4
5
6
7
8
9
10
11
12
13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14

15
from pathlib import Path
16
from typing import Dict, List, Optional, Union
17
18

import torch
19
import torch.nn.functional as F
20
from huggingface_hub.utils import validate_hf_hub_args
21
22
from safetensors import safe_open

23
from ..models.modeling_utils import _LOW_CPU_MEM_USAGE_DEFAULT, load_state_dict
24
from ..utils import (
25
    USE_PEFT_BACKEND,
26
    _get_model_file,
27
28
    is_accelerate_available,
    is_torch_version,
29
30
31
    is_transformers_available,
    logging,
)
Jenyuan-Huang's avatar
Jenyuan-Huang committed
32
from .unet_loader_utils import _maybe_expand_lora_scales
33
34
35


if is_transformers_available():
36
    from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
37
38

    from ..models.attention_processor import (
39
40
        AttnProcessor,
        AttnProcessor2_0,
41
42
        IPAdapterAttnProcessor,
        IPAdapterAttnProcessor2_0,
43
        IPAdapterXFormersAttnProcessor,
44
45
46
47
48
49
50
51
    )

logger = logging.get_logger(__name__)


class IPAdapterMixin:
    """Mixin for handling IP Adapters."""

52
    @validate_hf_hub_args
53
54
    def load_ip_adapter(
        self,
55
56
57
        pretrained_model_name_or_path_or_dict: Union[str, List[str], Dict[str, torch.Tensor]],
        subfolder: Union[str, List[str]],
        weight_name: Union[str, List[str]],
58
        image_encoder_folder: Optional[str] = "image_encoder",
59
60
61
62
        **kwargs,
    ):
        """
        Parameters:
63
            pretrained_model_name_or_path_or_dict (`str` or `List[str]` or `os.PathLike` or `List[os.PathLike]` or `dict` or `List[dict]`):
64
65
66
67
68
69
70
71
                Can be either:

                    - A string, the *model id* (for example `google/ddpm-celebahq-256`) of a pretrained model hosted on
                      the Hub.
                    - A path to a *directory* (for example `./my_model_directory`) containing the model weights saved
                      with [`ModelMixin.save_pretrained`].
                    - A [torch state
                      dict](https://pytorch.org/tutorials/beginner/saving_loading_models.html#what-is-a-state-dict).
72
            subfolder (`str` or `List[str]`):
73
74
                The subfolder location of a model file within a larger model repository on the Hub or locally. If a
                list is passed, it should have the same length as `weight_name`.
75
76
77
78
79
            weight_name (`str` or `List[str]`):
                The name of the weight file to load. If a list is passed, it should have the same length as
                `weight_name`.
            image_encoder_folder (`str`, *optional*, defaults to `image_encoder`):
                The subfolder location of the image encoder within a larger model repository on the Hub or locally.
80
81
82
83
84
                Pass `None` to not load the image encoder. If the image encoder is located in a folder inside
                `subfolder`, you only need to pass the name of the folder that contains image encoder weights, e.g.
                `image_encoder_folder="image_encoder"`. If the image encoder is located in a folder other than
                `subfolder`, you should pass the path to the folder that contains image encoder weights, for example,
                `image_encoder_folder="different_subfolder/image_encoder"`.
85
86
87
88
89
90
            cache_dir (`Union[str, os.PathLike]`, *optional*):
                Path to a directory where a downloaded pretrained model configuration is cached if the standard cache
                is not used.
            force_download (`bool`, *optional*, defaults to `False`):
                Whether or not to force the (re-)download of the model weights and configuration files, overriding the
                cached versions if they exist.
91

92
93
94
95
96
97
            proxies (`Dict[str, str]`, *optional*):
                A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',
                'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
            local_files_only (`bool`, *optional*, defaults to `False`):
                Whether to only load local model weights and configuration files or not. If set to `True`, the model
                won't be downloaded from the Hub.
98
            token (`str` or *bool*, *optional*):
99
100
101
102
103
                The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from
                `diffusers-cli login` (stored in `~/.huggingface`) is used.
            revision (`str`, *optional*, defaults to `"main"`):
                The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier
                allowed by Git.
104
105
106
107
108
            low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):
                Speed up model loading only loading the pretrained weights and not initializing the weights. This also
                tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.
                Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this
                argument to `True` will raise an error.
109
110
        """

111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
        # handle the list inputs for multiple IP Adapters
        if not isinstance(weight_name, list):
            weight_name = [weight_name]

        if not isinstance(pretrained_model_name_or_path_or_dict, list):
            pretrained_model_name_or_path_or_dict = [pretrained_model_name_or_path_or_dict]
        if len(pretrained_model_name_or_path_or_dict) == 1:
            pretrained_model_name_or_path_or_dict = pretrained_model_name_or_path_or_dict * len(weight_name)

        if not isinstance(subfolder, list):
            subfolder = [subfolder]
        if len(subfolder) == 1:
            subfolder = subfolder * len(weight_name)

        if len(weight_name) != len(pretrained_model_name_or_path_or_dict):
            raise ValueError("`weight_name` and `pretrained_model_name_or_path_or_dict` must have the same length.")

        if len(weight_name) != len(subfolder):
            raise ValueError("`weight_name` and `subfolder` must have the same length.")

131
        # Load the main state dict first.
132
        cache_dir = kwargs.pop("cache_dir", None)
133
134
        force_download = kwargs.pop("force_download", False)
        proxies = kwargs.pop("proxies", None)
135
136
        local_files_only = kwargs.pop("local_files_only", None)
        token = kwargs.pop("token", None)
137
        revision = kwargs.pop("revision", None)
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
        low_cpu_mem_usage = kwargs.pop("low_cpu_mem_usage", _LOW_CPU_MEM_USAGE_DEFAULT)

        if low_cpu_mem_usage and not is_accelerate_available():
            low_cpu_mem_usage = False
            logger.warning(
                "Cannot initialize model with low cpu memory usage because `accelerate` was not found in the"
                " environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install"
                " `accelerate` for faster and less memory-intense model loading. You can do so with: \n```\npip"
                " install accelerate\n```\n."
            )

        if low_cpu_mem_usage is True and not is_torch_version(">=", "1.9.0"):
            raise NotImplementedError(
                "Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set"
                " `low_cpu_mem_usage=False`."
            )
154
155
156
157
158

        user_agent = {
            "file_type": "attn_procs_weights",
            "framework": "pytorch",
        }
159
160
161
162
        state_dicts = []
        for pretrained_model_name_or_path_or_dict, weight_name, subfolder in zip(
            pretrained_model_name_or_path_or_dict, weight_name, subfolder
        ):
163
            if not isinstance(pretrained_model_name_or_path_or_dict, dict):
164
                model_file = _get_model_file(
165
                    pretrained_model_name_or_path_or_dict,
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
                    weights_name=weight_name,
                    cache_dir=cache_dir,
                    force_download=force_download,
                    proxies=proxies,
                    local_files_only=local_files_only,
                    token=token,
                    revision=revision,
                    subfolder=subfolder,
                    user_agent=user_agent,
                )
                if weight_name.endswith(".safetensors"):
                    state_dict = {"image_proj": {}, "ip_adapter": {}}
                    with safe_open(model_file, framework="pt", device="cpu") as f:
                        for key in f.keys():
                            if key.startswith("image_proj."):
                                state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
                            elif key.startswith("ip_adapter."):
                                state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
                else:
185
                    state_dict = load_state_dict(model_file)
186
            else:
187
188
189
190
191
192
193
194
195
196
                state_dict = pretrained_model_name_or_path_or_dict

            keys = list(state_dict.keys())
            if keys != ["image_proj", "ip_adapter"]:
                raise ValueError("Required keys are (`image_proj` and `ip_adapter`) missing from the state dict.")

            state_dicts.append(state_dict)

            # load CLIP image encoder here if it has not been registered to the pipeline yet
            if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is None:
197
198
199
200
201
202
203
204
205
206
207
208
                if image_encoder_folder is not None:
                    if not isinstance(pretrained_model_name_or_path_or_dict, dict):
                        logger.info(f"loading image_encoder from {pretrained_model_name_or_path_or_dict}")
                        if image_encoder_folder.count("/") == 0:
                            image_encoder_subfolder = Path(subfolder, image_encoder_folder).as_posix()
                        else:
                            image_encoder_subfolder = Path(image_encoder_folder).as_posix()

                        image_encoder = CLIPVisionModelWithProjection.from_pretrained(
                            pretrained_model_name_or_path_or_dict,
                            subfolder=image_encoder_subfolder,
                            low_cpu_mem_usage=low_cpu_mem_usage,
209
210
                            cache_dir=cache_dir,
                            local_files_only=local_files_only,
211
212
213
214
215
216
                        ).to(self.device, dtype=self.dtype)
                        self.register_modules(image_encoder=image_encoder)
                    else:
                        raise ValueError(
                            "`image_encoder` cannot be loaded because `pretrained_model_name_or_path_or_dict` is a state dict."
                        )
217
                else:
218
219
                    logger.warning(
                        "image_encoder is not loaded since `image_encoder_folder=None` passed. You will not be able to use `ip_adapter_image` when calling the pipeline with IP-Adapter."
220
                        "Use `ip_adapter_image_embeds` to pass pre-generated image embedding instead."
221
                    )
222
223
224

            # create feature extractor if it has not been registered to the pipeline yet
            if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is None:
225
226
227
228
229
                # FaceID IP adapters don't need the image encoder so it's not present, in this case we default to 224
                default_clip_size = 224
                clip_image_size = (
                    self.image_encoder.config.image_size if self.image_encoder is not None else default_clip_size
                )
230
                feature_extractor = CLIPImageProcessor(size=clip_image_size, crop_size=clip_image_size)
231
232
                self.register_modules(feature_extractor=feature_extractor)

233
        # load ip-adapter into unet
234
        unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
235
        unet._load_ip_adapter_weights(state_dicts, low_cpu_mem_usage=low_cpu_mem_usage)
236

237
238
239
240
241
242
243
244
245
246
247
248
        extra_loras = unet._load_ip_adapter_loras(state_dicts)
        if extra_loras != {}:
            if not USE_PEFT_BACKEND:
                logger.warning("PEFT backend is required to load these weights.")
            else:
                # apply the IP Adapter Face ID LoRA weights
                peft_config = getattr(unet, "peft_config", {})
                for k, lora in extra_loras.items():
                    if f"faceid_{k}" not in peft_config:
                        self.load_lora_weights(lora, adapter_name=f"faceid_{k}")
                        self.set_adapters([f"faceid_{k}"], adapter_weights=[1.0])

249
    def set_ip_adapter_scale(self, scale):
Steven Liu's avatar
Steven Liu committed
250
        """
Jenyuan-Huang's avatar
Jenyuan-Huang committed
251
252
        Set IP-Adapter scales per-transformer block. Input `scale` could be a single config or a list of configs for
        granular control over each IP-Adapter behavior. A config can be a float or a dictionary.
Steven Liu's avatar
Steven Liu committed
253
254
255
256

        Example:

        ```py
Jenyuan-Huang's avatar
Jenyuan-Huang committed
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
        # To use original IP-Adapter
        scale = 1.0
        pipeline.set_ip_adapter_scale(scale)

        # To use style block only
        scale = {
            "up": {"block_0": [0.0, 1.0, 0.0]},
        }
        pipeline.set_ip_adapter_scale(scale)

        # To use style+layout blocks
        scale = {
            "down": {"block_2": [0.0, 1.0]},
            "up": {"block_0": [0.0, 1.0, 0.0]},
        }
        pipeline.set_ip_adapter_scale(scale)

        # To use style and layout from 2 reference images
        scales = [{"down": {"block_2": [0.0, 1.0]}}, {"up": {"block_0": [0.0, 1.0, 0.0]}}]
        pipeline.set_ip_adapter_scale(scales)
Steven Liu's avatar
Steven Liu committed
277
278
        ```
        """
279
        unet = getattr(self, self.unet_name) if not hasattr(self, "unet") else self.unet
Jenyuan-Huang's avatar
Jenyuan-Huang committed
280
281
282
283
284
        if not isinstance(scale, list):
            scale = [scale]
        scale_configs = _maybe_expand_lora_scales(unet, scale, default_scale=0.0)

        for attn_name, attn_processor in unet.attn_processors.items():
285
286
287
            if isinstance(
                attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0, IPAdapterXFormersAttnProcessor)
            ):
Jenyuan-Huang's avatar
Jenyuan-Huang committed
288
                if len(scale_configs) != len(attn_processor.scale):
289
                    raise ValueError(
Jenyuan-Huang's avatar
Jenyuan-Huang committed
290
291
                        f"Cannot assign {len(scale_configs)} scale_configs to "
                        f"{len(attn_processor.scale)} IP-Adapter."
292
                    )
Jenyuan-Huang's avatar
Jenyuan-Huang committed
293
294
295
296
297
298
299
300
301
                elif len(scale_configs) == 1:
                    scale_configs = scale_configs * len(attn_processor.scale)
                for i, scale_config in enumerate(scale_configs):
                    if isinstance(scale_config, dict):
                        for k, s in scale_config.items():
                            if attn_name.startswith(k):
                                attn_processor.scale[i] = s
                    else:
                        attn_processor.scale[i] = scale_config
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319

    def unload_ip_adapter(self):
        """
        Unloads the IP Adapter weights

        Examples:

        ```python
        >>> # Assuming `pipeline` is already loaded with the IP Adapter weights.
        >>> pipeline.unload_ip_adapter()
        >>> ...
        ```
        """
        # remove CLIP image encoder
        if hasattr(self, "image_encoder") and getattr(self, "image_encoder", None) is not None:
            self.image_encoder = None
            self.register_to_config(image_encoder=[None, None])

320
321
322
323
324
325
        # remove feature extractor only when safety_checker is None as safety_checker uses
        # the feature_extractor later
        if not hasattr(self, "safety_checker"):
            if hasattr(self, "feature_extractor") and getattr(self, "feature_extractor", None) is not None:
                self.feature_extractor = None
                self.register_to_config(feature_extractor=[None, None])
326
327
328

        # remove hidden encoder
        self.unet.encoder_hid_proj = None
329
330
331
332
333
334
335
        self.unet.config.encoder_hid_dim_type = None

        # Kolors: restore `encoder_hid_proj` with `text_encoder_hid_proj`
        if hasattr(self.unet, "text_encoder_hid_proj") and self.unet.text_encoder_hid_proj is not None:
            self.unet.encoder_hid_proj = self.unet.text_encoder_hid_proj
            self.unet.text_encoder_hid_proj = None
            self.unet.config.encoder_hid_dim_type = "text_proj"
336
337

        # restore original Unet attention processors layers
338
339
340
341
342
343
344
        attn_procs = {}
        for name, value in self.unet.attn_processors.items():
            attn_processor_class = (
                AttnProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnProcessor()
            )
            attn_procs[name] = (
                attn_processor_class
345
346
347
                if isinstance(
                    value, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0, IPAdapterXFormersAttnProcessor)
                )
348
349
350
                else value.__class__()
            )
        self.unet.set_attn_processor(attn_procs)