utils.py 111 KB
Newer Older
1
# coding=utf-8
2
# Copyright 2025 HuggingFace Inc.
3
4
5
6
7
8
9
10
11
12
13
14
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Sayak Paul's avatar
Sayak Paul committed
15
import inspect
16
import os
Aryan's avatar
Aryan committed
17
import re
18
19
import tempfile
import unittest
UmerHA's avatar
UmerHA committed
20
from itertools import product
21
22

import numpy as np
23
import pytest
24
import torch
25
from parameterized import parameterized
26
27
28
29
30

from diffusers import (
    AutoencoderKL,
    UNet2DConditionModel,
)
31
from diffusers.utils import logging
32
from diffusers.utils.import_utils import is_peft_available
33
34

from ..testing_utils import (
35
    CaptureLogger,
36
    check_if_dicts_are_equal,
37
    floats_tensor,
38
    is_torch_version,
39
40
    require_peft_backend,
    require_peft_version_greater,
41
    require_torch_accelerator,
42
    require_transformers_version_greater,
43
    skip_mps,
44
45
46
47
48
    torch_device,
)


if is_peft_available():
49
    from peft import LoraConfig, inject_adapter_in_model, set_peft_model_state_dict
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
    from peft.tuners.tuners_utils import BaseTunerLayer
    from peft.utils import get_peft_model_state_dict


def state_dicts_almost_equal(sd1, sd2):
    sd1 = dict(sorted(sd1.items()))
    sd2 = dict(sorted(sd2.items()))

    models_are_equal = True
    for ten1, ten2 in zip(sd1.values(), sd2.values()):
        if (ten1 - ten2).abs().max() > 1e-3:
            models_are_equal = False

    return models_are_equal


def check_if_lora_correctly_set(model) -> bool:
    """
    Checks if the LoRA layers are correctly set with peft
    """
    for module in model.modules():
        if isinstance(module, BaseTunerLayer):
            return True
    return False


76
77
78
79
80
81
82
def check_module_lora_metadata(parsed_metadata: dict, lora_metadatas: dict, module_key: str):
    extracted = {
        k.removeprefix(f"{module_key}."): v for k, v in parsed_metadata.items() if k.startswith(f"{module_key}.")
    }
    check_if_dicts_are_equal(extracted, lora_metadatas[f"{module_key}_lora_adapter_metadata"])


83
84
85
86
87
88
def initialize_dummy_state_dict(state_dict):
    if not all(v.device.type == "meta" for _, v in state_dict.items()):
        raise ValueError("`state_dict` has non-meta values.")
    return {k: torch.randn(v.shape, device=torch_device, dtype=v.dtype) for k, v in state_dict.items()}


89
90
91
POSSIBLE_ATTENTION_KWARGS_NAMES = ["cross_attention_kwargs", "joint_attention_kwargs", "attention_kwargs"]


92
93
94
95
96
97
98
99
100
101
102
103
def determine_attention_kwargs_name(pipeline_class):
    call_signature_keys = inspect.signature(pipeline_class.__call__).parameters.keys()

    # TODO(diffusers): Discuss a common naming convention across library for 1.0.0 release
    for possible_attention_kwargs in POSSIBLE_ATTENTION_KWARGS_NAMES:
        if possible_attention_kwargs in call_signature_keys:
            attention_kwargs_name = possible_attention_kwargs
            break
    assert attention_kwargs_name is not None
    return attention_kwargs_name


104
105
106
@require_peft_backend
class PeftLoraLoaderMixinTests:
    pipeline_class = None
Aryan's avatar
Aryan committed
107

108
109
    scheduler_cls = None
    scheduler_kwargs = None
Sayak Paul's avatar
Sayak Paul committed
110

111
    has_two_text_encoders = False
112
    has_three_text_encoders = False
113
114
115
116
117
118
    text_encoder_cls, text_encoder_id, text_encoder_subfolder = None, None, ""
    text_encoder_2_cls, text_encoder_2_id, text_encoder_2_subfolder = None, None, ""
    text_encoder_3_cls, text_encoder_3_id, text_encoder_3_subfolder = None, None, ""
    tokenizer_cls, tokenizer_id, tokenizer_subfolder = None, None, ""
    tokenizer_2_cls, tokenizer_2_id, tokenizer_2_subfolder = None, None, ""
    tokenizer_3_cls, tokenizer_3_id, tokenizer_3_subfolder = None, None, ""
Sayak Paul's avatar
Sayak Paul committed
119

120
    unet_kwargs = None
Sayak Paul's avatar
Sayak Paul committed
121
    transformer_cls = None
122
    transformer_kwargs = None
Aryan's avatar
Aryan committed
123
    vae_cls = AutoencoderKL
124
125
    vae_kwargs = None

Aryan's avatar
Aryan committed
126
    text_encoder_target_modules = ["q_proj", "k_proj", "v_proj", "out_proj"]
127
    denoiser_target_modules = ["to_q", "to_k", "to_v", "to_out.0"]
Aryan's avatar
Aryan committed
128

129
130
131
132
133
134
135
136
    cached_non_lora_output = None

    def get_base_pipe_output(self):
        if self.cached_non_lora_output is None:
            self.cached_non_lora_output = self._compute_baseline_output()
        return self.cached_non_lora_output

    def get_dummy_components(self, scheduler_cls=None, use_dora=False, lora_alpha=None):
137
138
139
140
141
        if self.unet_kwargs and self.transformer_kwargs:
            raise ValueError("Both `unet_kwargs` and `transformer_kwargs` cannot be specified.")
        if self.has_two_text_encoders and self.has_three_text_encoders:
            raise ValueError("Both `has_two_text_encoders` and `has_three_text_encoders` cannot be True.")

142
        scheduler_cls = scheduler_cls if scheduler_cls is not None else self.scheduler_cls
143
        rank = 4
144
        lora_alpha = rank if lora_alpha is None else lora_alpha
145
146

        torch.manual_seed(0)
147
148
149
        if self.unet_kwargs is not None:
            unet = UNet2DConditionModel(**self.unet_kwargs)
        else:
Sayak Paul's avatar
Sayak Paul committed
150
            transformer = self.transformer_cls(**self.transformer_kwargs)
151
152
153
154

        scheduler = scheduler_cls(**self.scheduler_kwargs)

        torch.manual_seed(0)
Aryan's avatar
Aryan committed
155
        vae = self.vae_cls(**self.vae_kwargs)
156

157
158
159
160
        text_encoder = self.text_encoder_cls.from_pretrained(
            self.text_encoder_id, subfolder=self.text_encoder_subfolder
        )
        tokenizer = self.tokenizer_cls.from_pretrained(self.tokenizer_id, subfolder=self.tokenizer_subfolder)
161

Sayak Paul's avatar
Sayak Paul committed
162
        if self.text_encoder_2_cls is not None:
163
164
165
166
167
168
            text_encoder_2 = self.text_encoder_2_cls.from_pretrained(
                self.text_encoder_2_id, subfolder=self.text_encoder_2_subfolder
            )
            tokenizer_2 = self.tokenizer_2_cls.from_pretrained(
                self.tokenizer_2_id, subfolder=self.tokenizer_2_subfolder
            )
169

Sayak Paul's avatar
Sayak Paul committed
170
        if self.text_encoder_3_cls is not None:
171
172
173
174
175
176
            text_encoder_3 = self.text_encoder_3_cls.from_pretrained(
                self.text_encoder_3_id, subfolder=self.text_encoder_3_subfolder
            )
            tokenizer_3 = self.tokenizer_3_cls.from_pretrained(
                self.tokenizer_3_id, subfolder=self.tokenizer_3_subfolder
            )
177

178
179
        text_lora_config = LoraConfig(
            r=rank,
180
            lora_alpha=lora_alpha,
Aryan's avatar
Aryan committed
181
            target_modules=self.text_encoder_target_modules,
182
            init_lora_weights=False,
183
            use_dora=use_dora,
184
185
        )

186
        denoiser_lora_config = LoraConfig(
187
            r=rank,
188
            lora_alpha=lora_alpha,
189
            target_modules=self.denoiser_target_modules,
190
191
            init_lora_weights=False,
            use_dora=use_dora,
192
193
        )

Sayak Paul's avatar
Sayak Paul committed
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
        pipeline_components = {
            "scheduler": scheduler,
            "vae": vae,
            "text_encoder": text_encoder,
            "tokenizer": tokenizer,
        }
        # Denoiser
        if self.unet_kwargs is not None:
            pipeline_components.update({"unet": unet})
        elif self.transformer_kwargs is not None:
            pipeline_components.update({"transformer": transformer})

        # Remaining text encoders.
        if self.text_encoder_2_cls is not None:
            pipeline_components.update({"tokenizer_2": tokenizer_2, "text_encoder_2": text_encoder_2})
        if self.text_encoder_3_cls is not None:
            pipeline_components.update({"tokenizer_3": tokenizer_3, "text_encoder_3": text_encoder_3})

        # Remaining stuff
        init_params = inspect.signature(self.pipeline_class.__init__).parameters
        if "safety_checker" in init_params:
            pipeline_components.update({"safety_checker": None})
        if "feature_extractor" in init_params:
            pipeline_components.update({"feature_extractor": None})
        if "image_encoder" in init_params:
            pipeline_components.update({"image_encoder": None})
220

221
        return pipeline_components, text_lora_config, denoiser_lora_config
222

Sayak Paul's avatar
Sayak Paul committed
223
224
225
226
    @property
    def output_shape(self):
        raise NotImplementedError

227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
    def get_dummy_inputs(self, with_generator=True):
        batch_size = 1
        sequence_length = 10
        num_channels = 4
        sizes = (32, 32)

        generator = torch.manual_seed(0)
        noise = floats_tensor((batch_size, num_channels) + sizes)
        input_ids = torch.randint(1, sequence_length, size=(batch_size, sequence_length), generator=generator)

        pipeline_inputs = {
            "prompt": "A painting of a squirrel eating a burger",
            "num_inference_steps": 5,
            "guidance_scale": 6.0,
            "output_type": "np",
        }
        if with_generator:
            pipeline_inputs.update({"generator": generator})

        return noise, input_ids, pipeline_inputs

248
249
250
251
252
    def _compute_baseline_output(self):
        components, _, _ = self.get_dummy_components(self.scheduler_cls)
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
253

254
255
256
257
        # Always ensure the inputs are without the `generator`. Make sure to pass the `generator`
        # explicitly.
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
        return pipe(**inputs, generator=torch.manual_seed(0))[0]
258

259
260
261
262
263
264
265
    def _get_lora_state_dicts(self, modules_to_save):
        state_dicts = {}
        for module_name, module in modules_to_save.items():
            if module is not None:
                state_dicts[f"{module_name}_lora_layers"] = get_peft_model_state_dict(module)
        return state_dicts

266
267
268
269
270
271
272
    def _get_lora_adapter_metadata(self, modules_to_save):
        metadatas = {}
        for module_name, module in modules_to_save.items():
            if module is not None:
                metadatas[f"{module_name}_lora_adapter_metadata"] = module.peft_config["default"].to_dict()
        return metadatas

273
274
275
276
    def _get_modules_to_save(self, pipe, has_denoiser=False):
        modules_to_save = {}
        lora_loadable_modules = self.pipeline_class._lora_loadable_modules

277
278
279
280
281
        if (
            "text_encoder" in lora_loadable_modules
            and hasattr(pipe, "text_encoder")
            and getattr(pipe.text_encoder, "peft_config", None) is not None
        ):
282
283
            modules_to_save["text_encoder"] = pipe.text_encoder

284
285
286
287
288
        if (
            "text_encoder_2" in lora_loadable_modules
            and hasattr(pipe, "text_encoder_2")
            and getattr(pipe.text_encoder_2, "peft_config", None) is not None
        ):
289
290
291
292
293
294
295
296
297
298
299
            modules_to_save["text_encoder_2"] = pipe.text_encoder_2

        if has_denoiser:
            if "unet" in lora_loadable_modules and hasattr(pipe, "unet"):
                modules_to_save["unet"] = pipe.unet

            if "transformer" in lora_loadable_modules and hasattr(pipe, "transformer"):
                modules_to_save["transformer"] = pipe.transformer

        return modules_to_save

300
    def add_adapters_to_pipeline(self, pipe, text_lora_config=None, denoiser_lora_config=None, adapter_name="default"):
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
        if text_lora_config is not None:
            if "text_encoder" in self.pipeline_class._lora_loadable_modules:
                pipe.text_encoder.add_adapter(text_lora_config, adapter_name=adapter_name)
                self.assertTrue(
                    check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder"
                )

        if denoiser_lora_config is not None:
            denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
            denoiser.add_adapter(denoiser_lora_config, adapter_name=adapter_name)
            self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")
        else:
            denoiser = None

        if text_lora_config is not None and self.has_two_text_encoders or self.has_three_text_encoders:
            if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
                pipe.text_encoder_2.add_adapter(text_lora_config, adapter_name=adapter_name)
                self.assertTrue(
                    check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
                )
        return pipe, denoiser

323
324
325
326
    def test_simple_inference(self):
        """
        Tests a simple inference and makes sure it works as expected
        """
327
328
        output_no_lora = self.get_base_pipe_output()
        assert output_no_lora.shape == self.output_shape
329
330
331
332
333
334

    def test_simple_inference_with_text_lora(self):
        """
        Tests a simple inference with lora attached on the text encoder
        and makes sure it works as expected
        """
335
336
337
338
339
        components, text_lora_config, _ = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
340

341
        output_no_lora = self.get_base_pipe_output()
342
        pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None)
343

344
345
346
347
        output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertTrue(
            not np.allclose(output_lora, output_no_lora, atol=1e-3, rtol=1e-3), "Lora should change the output"
        )
348

349
350
351
    @require_peft_version_greater("0.13.1")
    def test_low_cpu_mem_usage_with_injection(self):
        """Tests if we can inject LoRA state dict with low_cpu_mem_usage."""
352
353
354
355
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
356

357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            inject_adapter_in_model(text_lora_config, pipe.text_encoder, low_cpu_mem_usage=True)
            self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder.")
            self.assertTrue(
                "meta" in {p.device.type for p in pipe.text_encoder.parameters()},
                "The LoRA params should be on 'meta' device.",
            )

            te_state_dict = initialize_dummy_state_dict(get_peft_model_state_dict(pipe.text_encoder))
            set_peft_model_state_dict(pipe.text_encoder, te_state_dict, low_cpu_mem_usage=True)
            self.assertTrue(
                "meta" not in {p.device.type for p in pipe.text_encoder.parameters()},
                "No param should be on 'meta' device.",
            )

        denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
        inject_adapter_in_model(denoiser_lora_config, denoiser, low_cpu_mem_usage=True)
        self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")
        self.assertTrue(
            "meta" in {p.device.type for p in denoiser.parameters()}, "The LoRA params should be on 'meta' device."
        )

        denoiser_state_dict = initialize_dummy_state_dict(get_peft_model_state_dict(denoiser))
        set_peft_model_state_dict(denoiser, denoiser_state_dict, low_cpu_mem_usage=True)
        self.assertTrue(
            "meta" not in {p.device.type for p in denoiser.parameters()}, "No param should be on 'meta' device."
        )

        if self.has_two_text_encoders or self.has_three_text_encoders:
            if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
                inject_adapter_in_model(text_lora_config, pipe.text_encoder_2, low_cpu_mem_usage=True)
388
                self.assertTrue(
389
                    check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
390
391
                )
                self.assertTrue(
392
                    "meta" in {p.device.type for p in pipe.text_encoder_2.parameters()},
393
394
395
                    "The LoRA params should be on 'meta' device.",
                )

396
397
                te2_state_dict = initialize_dummy_state_dict(get_peft_model_state_dict(pipe.text_encoder_2))
                set_peft_model_state_dict(pipe.text_encoder_2, te2_state_dict, low_cpu_mem_usage=True)
398
                self.assertTrue(
399
                    "meta" not in {p.device.type for p in pipe.text_encoder_2.parameters()},
400
401
402
                    "No param should be on 'meta' device.",
                )

403
404
405
        _, _, inputs = self.get_dummy_inputs()
        output_lora = pipe(**inputs)[0]
        self.assertTrue(output_lora.shape == self.output_shape)
406
407

    @require_peft_version_greater("0.13.1")
408
    @require_transformers_version_greater("4.45.2")
409
410
    def test_low_cpu_mem_usage_with_loading(self):
        """Tests if we can load LoRA state dict with low_cpu_mem_usage."""
411
412
413
414
415
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
416

417
        pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config)
418

419
        images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
420

421
422
423
424
425
426
        with tempfile.TemporaryDirectory() as tmpdirname:
            modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True)
            lora_state_dicts = self._get_lora_state_dicts(modules_to_save)
            self.pipeline_class.save_lora_weights(
                save_directory=tmpdirname, safe_serialization=False, **lora_state_dicts
            )
427

428
429
430
            self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
            pipe.unload_lora_weights()
            pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.bin"), low_cpu_mem_usage=False)
431

432
433
            for module_name, module in modules_to_save.items():
                self.assertTrue(check_if_lora_correctly_set(module), f"Lora not correctly set in {module_name}")
434

435
436
437
438
439
            images_lora_from_pretrained = pipe(**inputs, generator=torch.manual_seed(0))[0]
            self.assertTrue(
                np.allclose(images_lora, images_lora_from_pretrained, atol=1e-3, rtol=1e-3),
                "Loading from saved checkpoints should give same results.",
            )
440

441
442
443
            # Now, check for `low_cpu_mem_usage.`
            pipe.unload_lora_weights()
            pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.bin"), low_cpu_mem_usage=True)
444

445
446
            for module_name, module in modules_to_save.items():
                self.assertTrue(check_if_lora_correctly_set(module), f"Lora not correctly set in {module_name}")
447

448
449
450
451
452
            images_lora_from_pretrained_low_cpu = pipe(**inputs, generator=torch.manual_seed(0))[0]
            self.assertTrue(
                np.allclose(images_lora_from_pretrained_low_cpu, images_lora_from_pretrained, atol=1e-3, rtol=1e-3),
                "Loading from saved checkpoints with `low_cpu_mem_usage` should give same results.",
            )
453

454
455
456
457
458
    def test_simple_inference_with_text_lora_and_scale(self):
        """
        Tests a simple inference with lora attached on the text encoder + scale argument
        and makes sure it works as expected
        """
459
        attention_kwargs_name = determine_attention_kwargs_name(self.pipeline_class)
460
461
462
463
464
        components, text_lora_config, _ = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
Aryan's avatar
Aryan committed
465

466
        output_no_lora = self.get_base_pipe_output()
467

468
        pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None)
469

470
471
472
473
        output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertTrue(
            not np.allclose(output_lora, output_no_lora, atol=1e-3, rtol=1e-3), "Lora should change the output"
        )
474

475
476
        attention_kwargs = {attention_kwargs_name: {"scale": 0.5}}
        output_lora_scale = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0]
Aryan's avatar
Aryan committed
477

478
479
480
481
        self.assertTrue(
            not np.allclose(output_lora, output_lora_scale, atol=1e-3, rtol=1e-3),
            "Lora + scale should change the output",
        )
482

483
484
        attention_kwargs = {attention_kwargs_name: {"scale": 0.0}}
        output_lora_0_scale = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0]
Aryan's avatar
Aryan committed
485

486
487
488
489
        self.assertTrue(
            np.allclose(output_no_lora, output_lora_0_scale, atol=1e-3, rtol=1e-3),
            "Lora + 0 scale should lead to same result as no LoRA",
        )
490
491
492
493
494
495

    def test_simple_inference_with_text_lora_fused(self):
        """
        Tests a simple inference with lora attached into text encoder + fuses the lora weights into base model
        and makes sure it works as expected
        """
496
497
498
499
500
        components, text_lora_config, _ = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
501

502
        output_no_lora = self.get_base_pipe_output()
503

504
        pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None)
505

506
507
508
        pipe.fuse_lora()
        # Fusing should still keep the LoRA layers
        self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
509

510
511
512
513
514
        if self.has_two_text_encoders or self.has_three_text_encoders:
            if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
                self.assertTrue(
                    check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
                )
515

516
517
518
519
        ouput_fused = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertFalse(
            np.allclose(ouput_fused, output_no_lora, atol=1e-3, rtol=1e-3), "Fused lora should change the output"
        )
520
521
522
523
524
525

    def test_simple_inference_with_text_lora_unloaded(self):
        """
        Tests a simple inference with lora attached to text encoder, then unloads the lora weights
        and makes sure it works as expected
        """
526
527
528
529
530
        components, text_lora_config, _ = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
531

532
        output_no_lora = self.get_base_pipe_output()
533

534
        pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None)
535

536
537
538
        pipe.unload_lora_weights()
        # unloading should remove the LoRA layers
        self.assertFalse(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly unloaded in text encoder")
539

540
541
542
543
544
545
        if self.has_two_text_encoders or self.has_three_text_encoders:
            if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
                self.assertFalse(
                    check_if_lora_correctly_set(pipe.text_encoder_2),
                    "Lora not correctly unloaded in text encoder 2",
                )
546

547
548
549
550
551
        ouput_unloaded = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertTrue(
            np.allclose(ouput_unloaded, output_no_lora, atol=1e-3, rtol=1e-3),
            "Fused lora should change the output",
        )
552
553
554
555
556

    def test_simple_inference_with_text_lora_save_load(self):
        """
        Tests a simple usecase where users could use saving utilities for LoRA.
        """
557
558
559
560
561
        components, text_lora_config, _ = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
562

563
        pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None)
564

565
        images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
566

567
568
569
        with tempfile.TemporaryDirectory() as tmpdirname:
            modules_to_save = self._get_modules_to_save(pipe)
            lora_state_dicts = self._get_lora_state_dicts(modules_to_save)
570

571
572
573
            self.pipeline_class.save_lora_weights(
                save_directory=tmpdirname, safe_serialization=False, **lora_state_dicts
            )
Sayak Paul's avatar
Sayak Paul committed
574

575
576
577
            self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
            pipe.unload_lora_weights()
            pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))
578

579
580
        for module_name, module in modules_to_save.items():
            self.assertTrue(check_if_lora_correctly_set(module), f"Lora not correctly set in {module_name}")
581

582
        images_lora_from_pretrained = pipe(**inputs, generator=torch.manual_seed(0))[0]
583

584
585
586
587
        self.assertTrue(
            np.allclose(images_lora, images_lora_from_pretrained, atol=1e-3, rtol=1e-3),
            "Loading from saved checkpoints should give same results.",
        )
588

589
590
591
592
593
594
    def test_simple_inference_with_partial_text_lora(self):
        """
        Tests a simple inference with lora attached on the text encoder
        with different ranks and some adapters removed
        and makes sure it works as expected
        """
595
596
597
598
599
600
601
602
603
604
605
606
607
608
        components, _, _ = self.get_dummy_components()
        # Verify `StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder` handles different ranks per module (PR#8324).
        text_lora_config = LoraConfig(
            r=4,
            rank_pattern={self.text_encoder_target_modules[i]: i + 1 for i in range(3)},
            lora_alpha=4,
            target_modules=self.text_encoder_target_modules,
            init_lora_weights=False,
            use_dora=False,
        )
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
609

610
        output_no_lora = self.get_base_pipe_output()
611

612
        pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None)
613

614
615
616
617
618
619
620
621
622
        state_dict = {}
        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            # Gather the state dict for the PEFT model, excluding `layers.4`, to ensure `load_lora_into_text_encoder`
            # supports missing layers (PR#8324).
            state_dict = {
                f"text_encoder.{module_name}": param
                for module_name, param in get_peft_model_state_dict(pipe.text_encoder).items()
                if "text_model.encoder.layers.4" not in module_name
            }
623

624
625
626
627
628
629
630
631
632
        if self.has_two_text_encoders or self.has_three_text_encoders:
            if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
                state_dict.update(
                    {
                        f"text_encoder_2.{module_name}": param
                        for module_name, param in get_peft_model_state_dict(pipe.text_encoder_2).items()
                        if "text_model.encoder.layers.4" not in module_name
                    }
                )
633

634
635
636
637
        output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertTrue(
            not np.allclose(output_lora, output_no_lora, atol=1e-3, rtol=1e-3), "Lora should change the output"
        )
638

639
640
641
        # Unload lora and load it back using the pipe.load_lora_weights machinery
        pipe.unload_lora_weights()
        pipe.load_lora_weights(state_dict)
642

643
644
645
646
647
        output_partial_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertTrue(
            not np.allclose(output_partial_lora, output_lora, atol=1e-3, rtol=1e-3),
            "Removing adapters should change the output",
        )
648

649
    def test_simple_inference_save_pretrained_with_text_lora(self):
650
651
652
        """
        Tests a simple usecase where users could use saving utilities for LoRA through save_pretrained
        """
653
654
655
656
657
        components, text_lora_config, _ = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
658

659
660
        pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config=None)
        images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
661

662
663
        with tempfile.TemporaryDirectory() as tmpdirname:
            pipe.save_pretrained(tmpdirname)
664

665
666
            pipe_from_pretrained = self.pipeline_class.from_pretrained(tmpdirname)
            pipe_from_pretrained.to(torch_device)
667

668
669
670
671
672
673
674
675
        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            self.assertTrue(
                check_if_lora_correctly_set(pipe_from_pretrained.text_encoder),
                "Lora not correctly set in text encoder",
            )

        if self.has_two_text_encoders or self.has_three_text_encoders:
            if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
676
                self.assertTrue(
677
678
                    check_if_lora_correctly_set(pipe_from_pretrained.text_encoder_2),
                    "Lora not correctly set in text encoder 2",
679
                )
680

681
        images_lora_save_pretrained = pipe_from_pretrained(**inputs, generator=torch.manual_seed(0))[0]
682

683
684
685
686
        self.assertTrue(
            np.allclose(images_lora, images_lora_save_pretrained, atol=1e-3, rtol=1e-3),
            "Loading from saved checkpoints should give same results.",
        )
687

688
    def test_simple_inference_with_text_denoiser_lora_save_load(self):
689
690
691
        """
        Tests a simple usecase where users could use saving utilities for LoRA for Unet + text encoder
        """
692
693
694
695
696
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
697

698
        pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config)
699

700
        images_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
701

702
703
704
705
706
707
        with tempfile.TemporaryDirectory() as tmpdirname:
            modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True)
            lora_state_dicts = self._get_lora_state_dicts(modules_to_save)
            self.pipeline_class.save_lora_weights(
                save_directory=tmpdirname, safe_serialization=False, **lora_state_dicts
            )
708

709
710
711
            self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
            pipe.unload_lora_weights()
            pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.bin"))
712

713
714
        for module_name, module in modules_to_save.items():
            self.assertTrue(check_if_lora_correctly_set(module), f"Lora not correctly set in {module_name}")
715

716
717
718
719
720
        images_lora_from_pretrained = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertTrue(
            np.allclose(images_lora, images_lora_from_pretrained, atol=1e-3, rtol=1e-3),
            "Loading from saved checkpoints should give same results.",
        )
721

722
    def test_simple_inference_with_text_denoiser_lora_and_scale(self):
723
724
725
726
        """
        Tests a simple inference with lora attached on the text encoder + Unet + scale argument
        and makes sure it works as expected
        """
727
        attention_kwargs_name = determine_attention_kwargs_name(self.pipeline_class)
728
729
730
731
732
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
Aryan's avatar
Aryan committed
733

734
        output_no_lora = self.get_base_pipe_output()
735
        pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config)
736

737
738
739
740
        output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertTrue(
            not np.allclose(output_lora, output_no_lora, atol=1e-3, rtol=1e-3), "Lora should change the output"
        )
741

742
743
        attention_kwargs = {attention_kwargs_name: {"scale": 0.5}}
        output_lora_scale = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0]
744

745
746
747
748
        self.assertTrue(
            not np.allclose(output_lora, output_lora_scale, atol=1e-3, rtol=1e-3),
            "Lora + scale should change the output",
        )
Aryan's avatar
Aryan committed
749

750
751
        attention_kwargs = {attention_kwargs_name: {"scale": 0.0}}
        output_lora_0_scale = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0]
752

753
754
755
756
        self.assertTrue(
            np.allclose(output_no_lora, output_lora_0_scale, atol=1e-3, rtol=1e-3),
            "Lora + 0 scale should lead to same result as no LoRA",
        )
Aryan's avatar
Aryan committed
757

758
        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
759
            self.assertTrue(
760
761
                pipe.text_encoder.text_model.encoder.layers[0].self_attn.q_proj.scaling["default"] == 1.0,
                "The scaling parameter has not been correctly restored!",
762
763
            )

764
    def test_simple_inference_with_text_lora_denoiser_fused(self):
765
766
767
768
        """
        Tests a simple inference with lora attached into text encoder + fuses the lora weights into base model
        and makes sure it works as expected - with unet
        """
769
770
771
772
773
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
774

775
        output_no_lora = self.get_base_pipe_output()
776

777
        pipe, denoiser = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config)
778

779
        pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules)
Aryan's avatar
Aryan committed
780

781
782
783
        # Fusing should still keep the LoRA layers
        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
Aryan's avatar
Aryan committed
784

785
        self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser")
786

787
788
789
790
791
        if self.has_two_text_encoders or self.has_three_text_encoders:
            if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
                self.assertTrue(
                    check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
                )
792

793
794
795
796
        output_fused = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertFalse(
            np.allclose(output_fused, output_no_lora, atol=1e-3, rtol=1e-3), "Fused lora should change the output"
        )
797

798
    def test_simple_inference_with_text_denoiser_lora_unloaded(self):
799
800
801
802
        """
        Tests a simple inference with lora attached to text encoder and unet, then unloads the lora weights
        and makes sure it works as expected
        """
803
804
805
806
807
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
808

809
        output_no_lora = self.get_base_pipe_output()
810

811
        pipe, denoiser = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config)
812

813
814
815
816
        pipe.unload_lora_weights()
        # unloading should remove the LoRA layers
        self.assertFalse(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly unloaded in text encoder")
        self.assertFalse(check_if_lora_correctly_set(denoiser), "Lora not correctly unloaded in denoiser")
817

818
819
820
821
822
823
        if self.has_two_text_encoders or self.has_three_text_encoders:
            if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
                self.assertFalse(
                    check_if_lora_correctly_set(pipe.text_encoder_2),
                    "Lora not correctly unloaded in text encoder 2",
                )
824

825
826
827
828
829
        output_unloaded = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertTrue(
            np.allclose(output_unloaded, output_no_lora, atol=1e-3, rtol=1e-3),
            "Fused lora should change the output",
        )
830

Aryan's avatar
Aryan committed
831
832
833
    def test_simple_inference_with_text_denoiser_lora_unfused(
        self, expected_atol: float = 1e-3, expected_rtol: float = 1e-3
    ):
834
835
836
837
        """
        Tests a simple inference with lora attached to text encoder and unet, then unloads the lora weights
        and makes sure it works as expected
        """
838
839
840
841
842
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
843

844
        pipe, denoiser = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config)
845

846
847
848
        pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules)
        self.assertTrue(pipe.num_fused_loras == 1, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}")
        output_fused_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
849

850
851
852
        pipe.unfuse_lora(components=self.pipeline_class._lora_loadable_modules)
        self.assertTrue(pipe.num_fused_loras == 0, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}")
        output_unfused_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
853

854
855
856
        # unloading should remove the LoRA layers
        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Unfuse should still keep LoRA layers")
Aryan's avatar
Aryan committed
857

858
        self.assertTrue(check_if_lora_correctly_set(denoiser), "Unfuse should still keep LoRA layers")
859

860
861
862
863
864
        if self.has_two_text_encoders or self.has_three_text_encoders:
            if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
                self.assertTrue(
                    check_if_lora_correctly_set(pipe.text_encoder_2), "Unfuse should still keep LoRA layers"
                )
865

866
867
868
869
870
        # Fuse and unfuse should lead to the same results
        self.assertTrue(
            np.allclose(output_fused_lora, output_unfused_lora, atol=expected_atol, rtol=expected_rtol),
            "Fused lora should not change the output",
        )
871

872
    def test_simple_inference_with_text_denoiser_multi_adapter(self):
873
874
875
876
        """
        Tests a simple inference with lora attached to text encoder and unet, attaches
        multiple adapters and set them
        """
877
878
879
880
881
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
882

883
        output_no_lora = self.get_base_pipe_output()
884

885
886
887
888
        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
            pipe.text_encoder.add_adapter(text_lora_config, "adapter-2")
            self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
889

890
891
892
893
        denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
        denoiser.add_adapter(denoiser_lora_config, "adapter-1")
        denoiser.add_adapter(denoiser_lora_config, "adapter-2")
        self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")
894

895
896
897
898
899
900
901
        if self.has_two_text_encoders or self.has_three_text_encoders:
            if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
                pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1")
                pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2")
                self.assertTrue(
                    check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
                )
902

903
904
905
906
907
908
        pipe.set_adapters("adapter-1")
        output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertFalse(
            np.allclose(output_no_lora, output_adapter_1, atol=1e-3, rtol=1e-3),
            "Adapter outputs should be different.",
        )
909

910
911
912
913
914
915
        pipe.set_adapters("adapter-2")
        output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertFalse(
            np.allclose(output_no_lora, output_adapter_2, atol=1e-3, rtol=1e-3),
            "Adapter outputs should be different.",
        )
916

917
918
919
920
921
922
        pipe.set_adapters(["adapter-1", "adapter-2"])
        output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertFalse(
            np.allclose(output_no_lora, output_adapter_mixed, atol=1e-3, rtol=1e-3),
            "Adapter outputs should be different.",
        )
923

924
925
926
927
928
        # Fuse and unfuse should lead to the same results
        self.assertFalse(
            np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3),
            "Adapter 1 and 2 should give different results",
        )
929

930
931
932
933
        self.assertFalse(
            np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3),
            "Adapter 1 and mixed adapters should give different results",
        )
934

935
936
937
938
        self.assertFalse(
            np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3),
            "Adapter 2 and mixed adapters should give different results",
        )
939

940
941
        pipe.disable_lora()
        output_disabled = pipe(**inputs, generator=torch.manual_seed(0))[0]
942

943
944
945
946
        self.assertTrue(
            np.allclose(output_no_lora, output_disabled, atol=1e-3, rtol=1e-3),
            "output with no lora and output with lora disabled should give same results",
        )
947

948
    def test_wrong_adapter_name_raises_error(self):
949
950
        adapter_name = "adapter-1"

951
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
952
953
954
955
956
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)

957
        pipe, _ = self.add_adapters_to_pipeline(
958
959
            pipe, text_lora_config, denoiser_lora_config, adapter_name=adapter_name
        )
960
961
962
963
964
965
966

        with self.assertRaises(ValueError) as err_context:
            pipe.set_adapters("test")

        self.assertTrue("not in the list of present adapters" in str(err_context.exception))

        # test this works.
967
        pipe.set_adapters(adapter_name)
968
969
        _ = pipe(**inputs, generator=torch.manual_seed(0))[0]

970
    def test_multiple_wrong_adapter_name_raises_error(self):
971
        adapter_name = "adapter-1"
972
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
973
974
975
976
977
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)

978
        pipe, _ = self.add_adapters_to_pipeline(
979
980
            pipe, text_lora_config, denoiser_lora_config, adapter_name=adapter_name
        )
981
982
983
984
985

        scale_with_wrong_components = {"foo": 0.0, "bar": 0.0, "tik": 0.0}
        logger = logging.get_logger("diffusers.loaders.lora_base")
        logger.setLevel(30)
        with CaptureLogger(logger) as cap_logger:
986
            pipe.set_adapters(adapter_name, adapter_weights=scale_with_wrong_components)
987
988
989
990
991
992

        wrong_components = sorted(set(scale_with_wrong_components.keys()))
        msg = f"The following components in `adapter_weights` are not part of the pipeline: {wrong_components}. "
        self.assertTrue(msg in str(cap_logger.out))

        # test this works.
993
        pipe.set_adapters(adapter_name)
994
995
        _ = pipe(**inputs, generator=torch.manual_seed(0))[0]

996
    def test_simple_inference_with_text_denoiser_block_scale(self):
UmerHA's avatar
UmerHA committed
997
998
        """
        Tests a simple inference with lora attached to text encoder and unet, attaches
Aryan's avatar
Aryan committed
999
        one adapter and set different weights for different blocks (i.e. block lora)
UmerHA's avatar
UmerHA committed
1000
        """
1001
1002
1003
1004
1005
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
UmerHA's avatar
UmerHA committed
1006

1007
        output_no_lora = self.get_base_pipe_output()
UmerHA's avatar
UmerHA committed
1008

1009
1010
        pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
        self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
Aryan's avatar
Aryan committed
1011

1012
1013
1014
        denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
        denoiser.add_adapter(denoiser_lora_config)
        self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")
UmerHA's avatar
UmerHA committed
1015

1016
1017
1018
1019
1020
1021
        if self.has_two_text_encoders or self.has_three_text_encoders:
            if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
                pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1")
                self.assertTrue(
                    check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
                )
UmerHA's avatar
UmerHA committed
1022

1023
1024
1025
        weights_1 = {"text_encoder": 2, "unet": {"down": 5}}
        pipe.set_adapters("adapter-1", weights_1)
        output_weights_1 = pipe(**inputs, generator=torch.manual_seed(0))[0]
UmerHA's avatar
UmerHA committed
1026

1027
1028
1029
        weights_2 = {"unet": {"up": 5}}
        pipe.set_adapters("adapter-1", weights_2)
        output_weights_2 = pipe(**inputs, generator=torch.manual_seed(0))[0]
UmerHA's avatar
UmerHA committed
1030

1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
        self.assertFalse(
            np.allclose(output_weights_1, output_weights_2, atol=1e-3, rtol=1e-3),
            "LoRA weights 1 and 2 should give different results",
        )
        self.assertFalse(
            np.allclose(output_no_lora, output_weights_1, atol=1e-3, rtol=1e-3),
            "No adapter and LoRA weights 1 should give different results",
        )
        self.assertFalse(
            np.allclose(output_no_lora, output_weights_2, atol=1e-3, rtol=1e-3),
            "No adapter and LoRA weights 2 should give different results",
        )
UmerHA's avatar
UmerHA committed
1043

1044
1045
        pipe.disable_lora()
        output_disabled = pipe(**inputs, generator=torch.manual_seed(0))[0]
UmerHA's avatar
UmerHA committed
1046

1047
1048
1049
1050
        self.assertTrue(
            np.allclose(output_no_lora, output_disabled, atol=1e-3, rtol=1e-3),
            "output with no lora and output with lora disabled should give same results",
        )
UmerHA's avatar
UmerHA committed
1051

1052
    def test_simple_inference_with_text_denoiser_multi_adapter_block_lora(self):
UmerHA's avatar
UmerHA committed
1053
1054
        """
        Tests a simple inference with lora attached to text encoder and unet, attaches
1055
        multiple adapters and set different weights for different blocks (i.e. block lora)
UmerHA's avatar
UmerHA committed
1056
        """
1057
1058
1059
1060
1061
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
UmerHA's avatar
UmerHA committed
1062

1063
        output_no_lora = self.get_base_pipe_output()
UmerHA's avatar
UmerHA committed
1064

1065
1066
1067
1068
        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
            pipe.text_encoder.add_adapter(text_lora_config, "adapter-2")
            self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
UmerHA's avatar
UmerHA committed
1069

1070
1071
1072
1073
        denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
        denoiser.add_adapter(denoiser_lora_config, "adapter-1")
        denoiser.add_adapter(denoiser_lora_config, "adapter-2")
        self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")
UmerHA's avatar
UmerHA committed
1074

1075
1076
1077
1078
1079
1080
1081
        if self.has_two_text_encoders or self.has_three_text_encoders:
            if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
                pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1")
                pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2")
                self.assertTrue(
                    check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
                )
UmerHA's avatar
UmerHA committed
1082

1083
1084
        scales_1 = {"text_encoder": 2, "unet": {"down": 5}}
        scales_2 = {"unet": {"down": 5, "mid": 5}}
UmerHA's avatar
UmerHA committed
1085

1086
1087
        pipe.set_adapters("adapter-1", scales_1)
        output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0]
UmerHA's avatar
UmerHA committed
1088

1089
1090
        pipe.set_adapters("adapter-2", scales_2)
        output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0))[0]
UmerHA's avatar
UmerHA committed
1091

1092
1093
        pipe.set_adapters(["adapter-1", "adapter-2"], [scales_1, scales_2])
        output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0))[0]
UmerHA's avatar
UmerHA committed
1094

1095
1096
1097
1098
1099
        # Fuse and unfuse should lead to the same results
        self.assertFalse(
            np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3),
            "Adapter 1 and 2 should give different results",
        )
UmerHA's avatar
UmerHA committed
1100

1101
1102
1103
1104
        self.assertFalse(
            np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3),
            "Adapter 1 and mixed adapters should give different results",
        )
UmerHA's avatar
UmerHA committed
1105

1106
1107
1108
1109
        self.assertFalse(
            np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3),
            "Adapter 2 and mixed adapters should give different results",
        )
UmerHA's avatar
UmerHA committed
1110

1111
1112
        pipe.disable_lora()
        output_disabled = pipe(**inputs, generator=torch.manual_seed(0))[0]
UmerHA's avatar
UmerHA committed
1113

1114
1115
1116
1117
        self.assertTrue(
            np.allclose(output_no_lora, output_disabled, atol=1e-3, rtol=1e-3),
            "output with no lora and output with lora disabled should give same results",
        )
UmerHA's avatar
UmerHA committed
1118

1119
1120
1121
        # a mismatching number of adapter_names and adapter_weights should raise an error
        with self.assertRaises(ValueError):
            pipe.set_adapters(["adapter-1", "adapter-2"], [scales_1])
UmerHA's avatar
UmerHA committed
1122

1123
    def test_simple_inference_with_text_denoiser_block_scale_for_all_dict_options(self):
UmerHA's avatar
UmerHA committed
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
        """Tests that any valid combination of lora block scales can be used in pipe.set_adapter"""

        def updown_options(blocks_with_tf, layers_per_block, value):
            """
            Generate every possible combination for how a lora weight dict for the up/down part can be.
            E.g. 2, {"block_1": 2}, {"block_1": [2,2,2]}, {"block_1": 2, "block_2": [2,2,2]}, ...
            """
            num_val = value
            list_val = [value] * layers_per_block

            node_opts = [None, num_val, list_val]
            node_opts_foreach_block = [node_opts] * len(blocks_with_tf)

            updown_opts = [num_val]
            for nodes in product(*node_opts_foreach_block):
                if all(n is None for n in nodes):
                    continue
                opt = {}
                for b, n in zip(blocks_with_tf, nodes):
                    if n is not None:
                        opt["block_" + str(b)] = n
                updown_opts.append(opt)
            return updown_opts

        def all_possible_dict_opts(unet, value):
            """
            Generate every possible combination for how a lora weight dict can be.
            E.g. 2, {"unet: {"down": 2}}, {"unet: {"down": [2,2,2]}}, {"unet: {"mid": 2, "up": [2,2,2]}}, ...
            """

            down_blocks_with_tf = [i for i, d in enumerate(unet.down_blocks) if hasattr(d, "attentions")]
            up_blocks_with_tf = [i for i, u in enumerate(unet.up_blocks) if hasattr(u, "attentions")]

            layers_per_block = unet.config.layers_per_block

            text_encoder_opts = [None, value]
            text_encoder_2_opts = [None, value]
            mid_opts = [None, value]
            down_opts = [None] + updown_options(down_blocks_with_tf, layers_per_block, value)
            up_opts = [None] + updown_options(up_blocks_with_tf, layers_per_block + 1, value)

            opts = []

            for t1, t2, d, m, u in product(text_encoder_opts, text_encoder_2_opts, down_opts, mid_opts, up_opts):
                if all(o is None for o in (t1, t2, d, m, u)):
                    continue
                opt = {}
                if t1 is not None:
                    opt["text_encoder"] = t1
                if t2 is not None:
                    opt["text_encoder_2"] = t2
                if all(o is None for o in (d, m, u)):
                    # no unet scaling
                    continue
                opt["unet"] = {}
                if d is not None:
                    opt["unet"]["down"] = d
                if m is not None:
                    opt["unet"]["mid"] = m
                if u is not None:
                    opt["unet"]["up"] = u
                opts.append(opt)

            return opts

1189
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components(self.scheduler_cls)
UmerHA's avatar
UmerHA committed
1190
1191
1192
1193
1194
1195
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)

        pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
Aryan's avatar
Aryan committed
1196
1197
1198

        denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
        denoiser.add_adapter(denoiser_lora_config, "adapter-1")
UmerHA's avatar
UmerHA committed
1199

1200
        if self.has_two_text_encoders or self.has_three_text_encoders:
Sayak Paul's avatar
Sayak Paul committed
1201
1202
1203
            lora_loadable_components = self.pipeline_class._lora_loadable_modules
            if "text_encoder_2" in lora_loadable_components:
                pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1")
UmerHA's avatar
UmerHA committed
1204
1205
1206
1207
1208
1209
1210
1211

        for scale_dict in all_possible_dict_opts(pipe.unet, value=1234):
            # test if lora block scales can be set with this scale_dict
            if not self.has_two_text_encoders and "text_encoder_2" in scale_dict:
                del scale_dict["text_encoder_2"]

            pipe.set_adapters("adapter-1", scale_dict)  # test will fail if this line throws an error

1212
    def test_simple_inference_with_text_denoiser_multi_adapter_delete_adapter(self):
1213
1214
1215
1216
        """
        Tests a simple inference with lora attached to text encoder and unet, attaches
        multiple adapters and set/delete them
        """
1217
1218
1219
1220
1221
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
1222

1223
        output_no_lora = self.get_base_pipe_output()
1224

1225
1226
1227
1228
        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
            pipe.text_encoder.add_adapter(text_lora_config, "adapter-2")
            self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
1229

1230
1231
1232
1233
        denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
        denoiser.add_adapter(denoiser_lora_config, "adapter-1")
        denoiser.add_adapter(denoiser_lora_config, "adapter-2")
        self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")
1234

1235
1236
1237
1238
1239
1240
1241
1242
        if self.has_two_text_encoders or self.has_three_text_encoders:
            lora_loadable_components = self.pipeline_class._lora_loadable_modules
            if "text_encoder_2" in lora_loadable_components:
                pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1")
                pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2")
                self.assertTrue(
                    check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
                )
1243

1244
1245
        pipe.set_adapters("adapter-1")
        output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0]
1246

1247
1248
        pipe.set_adapters("adapter-2")
        output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0))[0]
1249

1250
1251
        pipe.set_adapters(["adapter-1", "adapter-2"])
        output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0))[0]
1252

1253
1254
1255
1256
        self.assertFalse(
            np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3),
            "Adapter 1 and 2 should give different results",
        )
1257

1258
1259
1260
1261
        self.assertFalse(
            np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3),
            "Adapter 1 and mixed adapters should give different results",
        )
1262

1263
1264
1265
1266
        self.assertFalse(
            np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3),
            "Adapter 2 and mixed adapters should give different results",
        )
1267

1268
1269
        pipe.delete_adapters("adapter-1")
        output_deleted_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0]
1270

1271
1272
1273
1274
        self.assertTrue(
            np.allclose(output_deleted_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3),
            "Adapter 1 and 2 should give different results",
        )
1275

1276
1277
        pipe.delete_adapters("adapter-2")
        output_deleted_adapters = pipe(**inputs, generator=torch.manual_seed(0))[0]
1278

1279
1280
1281
1282
        self.assertTrue(
            np.allclose(output_no_lora, output_deleted_adapters, atol=1e-3, rtol=1e-3),
            "output with no lora and output with lora disabled should give same results",
        )
1283

1284
1285
1286
        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
            pipe.text_encoder.add_adapter(text_lora_config, "adapter-2")
1287

1288
1289
1290
1291
        denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
        denoiser.add_adapter(denoiser_lora_config, "adapter-1")
        denoiser.add_adapter(denoiser_lora_config, "adapter-2")
        self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")
1292

1293
1294
        pipe.set_adapters(["adapter-1", "adapter-2"])
        pipe.delete_adapters(["adapter-1", "adapter-2"])
1295

1296
        output_deleted_adapters = pipe(**inputs, generator=torch.manual_seed(0))[0]
1297

1298
1299
1300
1301
        self.assertTrue(
            np.allclose(output_no_lora, output_deleted_adapters, atol=1e-3, rtol=1e-3),
            "output with no lora and output with lora disabled should give same results",
        )
1302

1303
    def test_simple_inference_with_text_denoiser_multi_adapter_weighted(self):
1304
1305
1306
1307
        """
        Tests a simple inference with lora attached to text encoder and unet, attaches
        multiple adapters and set them
        """
1308
1309
1310
1311
1312
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
1313

1314
        output_no_lora = self.get_base_pipe_output()
1315

1316
1317
1318
1319
        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
            pipe.text_encoder.add_adapter(text_lora_config, "adapter-2")
            self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
1320

1321
1322
1323
1324
        denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
        denoiser.add_adapter(denoiser_lora_config, "adapter-1")
        denoiser.add_adapter(denoiser_lora_config, "adapter-2")
        self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")
1325

1326
1327
1328
1329
1330
1331
1332
1333
        if self.has_two_text_encoders or self.has_three_text_encoders:
            lora_loadable_components = self.pipeline_class._lora_loadable_modules
            if "text_encoder_2" in lora_loadable_components:
                pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1")
                pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2")
                self.assertTrue(
                    check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
                )
1334

1335
1336
        pipe.set_adapters("adapter-1")
        output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0]
1337

1338
1339
        pipe.set_adapters("adapter-2")
        output_adapter_2 = pipe(**inputs, generator=torch.manual_seed(0))[0]
1340

1341
1342
        pipe.set_adapters(["adapter-1", "adapter-2"])
        output_adapter_mixed = pipe(**inputs, generator=torch.manual_seed(0))[0]
1343

1344
1345
1346
1347
1348
        # Fuse and unfuse should lead to the same results
        self.assertFalse(
            np.allclose(output_adapter_1, output_adapter_2, atol=1e-3, rtol=1e-3),
            "Adapter 1 and 2 should give different results",
        )
1349

1350
1351
1352
1353
        self.assertFalse(
            np.allclose(output_adapter_1, output_adapter_mixed, atol=1e-3, rtol=1e-3),
            "Adapter 1 and mixed adapters should give different results",
        )
1354

1355
1356
1357
1358
        self.assertFalse(
            np.allclose(output_adapter_2, output_adapter_mixed, atol=1e-3, rtol=1e-3),
            "Adapter 2 and mixed adapters should give different results",
        )
1359

1360
1361
        pipe.set_adapters(["adapter-1", "adapter-2"], [0.5, 0.6])
        output_adapter_mixed_weighted = pipe(**inputs, generator=torch.manual_seed(0))[0]
1362

1363
1364
1365
1366
        self.assertFalse(
            np.allclose(output_adapter_mixed_weighted, output_adapter_mixed, atol=1e-3, rtol=1e-3),
            "Weighted adapter and mixed adapter should give different results",
        )
1367

1368
1369
        pipe.disable_lora()
        output_disabled = pipe(**inputs, generator=torch.manual_seed(0))[0]
1370

1371
1372
1373
1374
        self.assertTrue(
            np.allclose(output_no_lora, output_disabled, atol=1e-3, rtol=1e-3),
            "output with no lora and output with lora disabled should give same results",
        )
1375

1376
    @skip_mps
1377
    @pytest.mark.xfail(
1378
        condition=torch.device(torch_device).type == "cpu" and is_torch_version(">=", "2.5"),
1379
        reason="Test currently fails on CPU and PyTorch 2.5.1 but not on PyTorch 2.4.1.",
1380
        strict=False,
1381
    )
1382
1383
1384
1385
1386
1387
    def test_lora_fuse_nan(self):
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
1388

1389
1390
1391
        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
            self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
1392

1393
1394
1395
        denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
        denoiser.add_adapter(denoiser_lora_config, "adapter-1")
        self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")
1396

1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
        # corrupt one LoRA weight with `inf` values
        with torch.no_grad():
            if self.unet_kwargs:
                pipe.unet.mid_block.attentions[0].transformer_blocks[0].attn1.to_q.lora_A["adapter-1"].weight += float(
                    "inf"
                )
            else:
                named_modules = [name for name, _ in pipe.transformer.named_modules()]
                possible_tower_names = [
                    "transformer_blocks",
                    "blocks",
                    "joint_transformer_blocks",
                    "single_transformer_blocks",
                ]
                filtered_tower_names = [
                    tower_name for tower_name in possible_tower_names if hasattr(pipe.transformer, tower_name)
                ]
                if len(filtered_tower_names) == 0:
                    reason = f"`pipe.transformer` didn't have any of the following attributes: {possible_tower_names}."
                    raise ValueError(reason)
                for tower_name in filtered_tower_names:
                    transformer_tower = getattr(pipe.transformer, tower_name)
                    has_attn1 = any("attn1" in name for name in named_modules)
                    if has_attn1:
                        transformer_tower[0].attn1.to_q.lora_A["adapter-1"].weight += float("inf")
                    else:
                        transformer_tower[0].attn.to_q.lora_A["adapter-1"].weight += float("inf")

        # with `safe_fusing=True` we should see an Error
        with self.assertRaises(ValueError):
            pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=True)

        # without we should not see an error, but every image will be black
        pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, safe_fusing=False)
        out = pipe(**inputs)[0]

        self.assertTrue(np.isnan(out).all())
1434
1435
1436
1437
1438
1439

    def test_get_adapters(self):
        """
        Tests a simple usecase where we attach multiple adapters and check if the results
        are the expected results
        """
1440
1441
1442
1443
1444
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
1445

1446
        pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
Aryan's avatar
Aryan committed
1447

1448
1449
        denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
        denoiser.add_adapter(denoiser_lora_config, "adapter-1")
1450

1451
1452
        adapter_names = pipe.get_active_adapters()
        self.assertListEqual(adapter_names, ["adapter-1"])
1453

1454
1455
        pipe.text_encoder.add_adapter(text_lora_config, "adapter-2")
        denoiser.add_adapter(denoiser_lora_config, "adapter-2")
1456

1457
1458
        adapter_names = pipe.get_active_adapters()
        self.assertListEqual(adapter_names, ["adapter-2"])
1459

1460
1461
        pipe.set_adapters(["adapter-1", "adapter-2"])
        self.assertListEqual(pipe.get_active_adapters(), ["adapter-1", "adapter-2"])
1462
1463
1464
1465
1466
1467

    def test_get_list_adapters(self):
        """
        Tests a simple usecase where we attach multiple adapters and check if the results
        are the expected results
        """
1468
1469
1470
1471
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
1472

1473
1474
1475
1476
1477
        # 1.
        dicts_to_be_checked = {}
        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
            dicts_to_be_checked = {"text_encoder": ["adapter-1"]}
Aryan's avatar
Aryan committed
1478

1479
1480
1481
1482
1483
1484
        if self.unet_kwargs is not None:
            pipe.unet.add_adapter(denoiser_lora_config, "adapter-1")
            dicts_to_be_checked.update({"unet": ["adapter-1"]})
        else:
            pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1")
            dicts_to_be_checked.update({"transformer": ["adapter-1"]})
1485

1486
        self.assertDictEqual(pipe.get_list_adapters(), dicts_to_be_checked)
Aryan's avatar
Aryan committed
1487

1488
1489
1490
1491
1492
        # 2.
        dicts_to_be_checked = {}
        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            pipe.text_encoder.add_adapter(text_lora_config, "adapter-2")
            dicts_to_be_checked = {"text_encoder": ["adapter-1", "adapter-2"]}
Aryan's avatar
Aryan committed
1493

1494
1495
1496
1497
1498
1499
        if self.unet_kwargs is not None:
            pipe.unet.add_adapter(denoiser_lora_config, "adapter-2")
            dicts_to_be_checked.update({"unet": ["adapter-1", "adapter-2"]})
        else:
            pipe.transformer.add_adapter(denoiser_lora_config, "adapter-2")
            dicts_to_be_checked.update({"transformer": ["adapter-1", "adapter-2"]})
1500

1501
        self.assertDictEqual(pipe.get_list_adapters(), dicts_to_be_checked)
Aryan's avatar
Aryan committed
1502

1503
1504
        # 3.
        pipe.set_adapters(["adapter-1", "adapter-2"])
Aryan's avatar
Aryan committed
1505

1506
1507
1508
        dicts_to_be_checked = {}
        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            dicts_to_be_checked = {"text_encoder": ["adapter-1", "adapter-2"]}
Aryan's avatar
Aryan committed
1509

1510
1511
1512
1513
        if self.unet_kwargs is not None:
            dicts_to_be_checked.update({"unet": ["adapter-1", "adapter-2"]})
        else:
            dicts_to_be_checked.update({"transformer": ["adapter-1", "adapter-2"]})
Aryan's avatar
Aryan committed
1514

1515
1516
1517
1518
        self.assertDictEqual(
            pipe.get_list_adapters(),
            dicts_to_be_checked,
        )
1519

1520
1521
1522
1523
        # 4.
        dicts_to_be_checked = {}
        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            dicts_to_be_checked = {"text_encoder": ["adapter-1", "adapter-2"]}
Aryan's avatar
Aryan committed
1524

1525
1526
1527
1528
1529
1530
        if self.unet_kwargs is not None:
            pipe.unet.add_adapter(denoiser_lora_config, "adapter-3")
            dicts_to_be_checked.update({"unet": ["adapter-1", "adapter-2", "adapter-3"]})
        else:
            pipe.transformer.add_adapter(denoiser_lora_config, "adapter-3")
            dicts_to_be_checked.update({"transformer": ["adapter-1", "adapter-2", "adapter-3"]})
Aryan's avatar
Aryan committed
1531

1532
        self.assertDictEqual(pipe.get_list_adapters(), dicts_to_be_checked)
1533

Aryan's avatar
Aryan committed
1534
1535
1536
    def test_simple_inference_with_text_lora_denoiser_fused_multi(
        self, expected_atol: float = 1e-3, expected_rtol: float = 1e-3
    ):
1537
1538
1539
1540
        """
        Tests a simple inference with lora attached into text encoder + fuses the lora weights into base model
        and makes sure it works as expected - with unet and multi-adapter case
        """
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)

        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
            self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
            pipe.text_encoder.add_adapter(text_lora_config, "adapter-2")

        denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
        denoiser.add_adapter(denoiser_lora_config, "adapter-1")
        self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")
        denoiser.add_adapter(denoiser_lora_config, "adapter-2")

        if self.has_two_text_encoders or self.has_three_text_encoders:
            lora_loadable_components = self.pipeline_class._lora_loadable_modules
            if "text_encoder_2" in lora_loadable_components:
                pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1")
                self.assertTrue(
                    check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
                )
                pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-2")

        # set them to multi-adapter inference mode
        pipe.set_adapters(["adapter-1", "adapter-2"])
        outputs_all_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]

        pipe.set_adapters(["adapter-1"])
        outputs_lora_1 = pipe(**inputs, generator=torch.manual_seed(0))[0]

        pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, adapter_names=["adapter-1"])
        self.assertTrue(pipe.num_fused_loras == 1, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}")

        # Fusing should still keep the LoRA layers so output should remain the same
        outputs_lora_1_fused = pipe(**inputs, generator=torch.manual_seed(0))[0]

        self.assertTrue(
            np.allclose(outputs_lora_1, outputs_lora_1_fused, atol=expected_atol, rtol=expected_rtol),
            "Fused lora should not change the output",
        )

        pipe.unfuse_lora(components=self.pipeline_class._lora_loadable_modules)
        self.assertTrue(pipe.num_fused_loras == 0, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}")

        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Unfuse should still keep LoRA layers")

        self.assertTrue(check_if_lora_correctly_set(denoiser), "Unfuse should still keep LoRA layers")

        if self.has_two_text_encoders or self.has_three_text_encoders:
            if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
                self.assertTrue(
                    check_if_lora_correctly_set(pipe.text_encoder_2), "Unfuse should still keep LoRA layers"
                )

        pipe.fuse_lora(components=self.pipeline_class._lora_loadable_modules, adapter_names=["adapter-2", "adapter-1"])
        self.assertTrue(pipe.num_fused_loras == 2, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}")

        # Fusing should still keep the LoRA layers
        output_all_lora_fused = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertTrue(
            np.allclose(output_all_lora_fused, outputs_all_lora, atol=expected_atol, rtol=expected_rtol),
            "Fused lora should not change the output",
        )
        pipe.unfuse_lora(components=self.pipeline_class._lora_loadable_modules)
        self.assertTrue(pipe.num_fused_loras == 0, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}")

    def test_lora_scale_kwargs_match_fusion(self, expected_atol: float = 1e-3, expected_rtol: float = 1e-3):
        attention_kwargs_name = determine_attention_kwargs_name(self.pipeline_class)

        for lora_scale in [1.0, 0.8]:
            components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
1615
1616
1617
1618
1619
            pipe = self.pipeline_class(**components)
            pipe = pipe.to(torch_device)
            pipe.set_progress_bar_config(disable=None)
            _, _, inputs = self.get_dummy_inputs(with_generator=False)

1620
            output_no_lora = self.get_base_pipe_output()
1621

Aryan's avatar
Aryan committed
1622
1623
1624
1625
1626
1627
            if "text_encoder" in self.pipeline_class._lora_loadable_modules:
                pipe.text_encoder.add_adapter(text_lora_config, "adapter-1")
                self.assertTrue(
                    check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder"
                )

Aryan's avatar
Aryan committed
1628
1629
1630
            denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
            denoiser.add_adapter(denoiser_lora_config, "adapter-1")
            self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")
1631

1632
            if self.has_two_text_encoders or self.has_three_text_encoders:
Sayak Paul's avatar
Sayak Paul committed
1633
1634
1635
1636
                lora_loadable_components = self.pipeline_class._lora_loadable_modules
                if "text_encoder_2" in lora_loadable_components:
                    pipe.text_encoder_2.add_adapter(text_lora_config, "adapter-1")
                    self.assertTrue(
1637
1638
                        check_if_lora_correctly_set(pipe.text_encoder_2),
                        "Lora not correctly set in text encoder 2",
Sayak Paul's avatar
Sayak Paul committed
1639
                    )
1640
1641

            pipe.set_adapters(["adapter-1"])
1642
1643
            attention_kwargs = {attention_kwargs_name: {"scale": lora_scale}}
            outputs_lora_1 = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0]
1644

1645
1646
1647
1648
1649
            pipe.fuse_lora(
                components=self.pipeline_class._lora_loadable_modules,
                adapter_names=["adapter-1"],
                lora_scale=lora_scale,
            )
1650
            self.assertTrue(pipe.num_fused_loras == 1, f"{pipe.num_fused_loras=}, {pipe.fused_loras=}")
1651

Aryan's avatar
Aryan committed
1652
            outputs_lora_1_fused = pipe(**inputs, generator=torch.manual_seed(0))[0]
1653
1654

            self.assertTrue(
Aryan's avatar
Aryan committed
1655
                np.allclose(outputs_lora_1, outputs_lora_1_fused, atol=expected_atol, rtol=expected_rtol),
1656
1657
                "Fused lora should not change the output",
            )
1658
1659
1660
            self.assertFalse(
                np.allclose(output_no_lora, outputs_lora_1, atol=expected_atol, rtol=expected_rtol),
                "LoRA should change the output",
1661
1662
            )

1663
    def test_simple_inference_with_dora(self):
1664
1665
1666
1667
1668
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components(use_dora=True)
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
1669

1670
1671
1672
        output_no_dora_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertTrue(output_no_dora_lora.shape == self.output_shape)
        pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config)
1673

1674
        output_dora_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]
1675

1676
1677
1678
1679
        self.assertFalse(
            np.allclose(output_dora_lora, output_no_dora_lora, atol=1e-3, rtol=1e-3),
            "DoRA lora should change the output",
        )
1680

1681
1682
    def test_missing_keys_warning(self):
        # Skip text encoder check for now as that is handled with `transformers`.
1683
        components, _, denoiser_lora_config = self.get_dummy_components()
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)

        denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
        denoiser.add_adapter(denoiser_lora_config)
        self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")

        with tempfile.TemporaryDirectory() as tmpdirname:
            modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True)
            lora_state_dicts = self._get_lora_state_dicts(modules_to_save)
            self.pipeline_class.save_lora_weights(
                save_directory=tmpdirname, safe_serialization=False, **lora_state_dicts
            )
            pipe.unload_lora_weights()
            self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
            state_dict = torch.load(os.path.join(tmpdirname, "pytorch_lora_weights.bin"), weights_only=True)

        # To make things dynamic since we cannot settle with a single key for all the models where we
        # offer PEFT support.
        missing_key = [k for k in state_dict if "lora_A" in k][0]
        del state_dict[missing_key]

1707
        logger = logging.get_logger("diffusers.utils.peft_utils")
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
        logger.setLevel(30)
        with CaptureLogger(logger) as cap_logger:
            pipe.load_lora_weights(state_dict)

        # Since the missing key won't contain the adapter name ("default_0").
        # Also strip out the component prefix (such as "unet." from `missing_key`).
        component = list({k.split(".")[0] for k in state_dict})[0]
        self.assertTrue(missing_key.replace(f"{component}.", "") in cap_logger.out.replace("default_0.", ""))

    def test_unexpected_keys_warning(self):
        # Skip text encoder check for now as that is handled with `transformers`.
1719
        components, _, denoiser_lora_config = self.get_dummy_components()
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)

        denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
        denoiser.add_adapter(denoiser_lora_config)
        self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")

        with tempfile.TemporaryDirectory() as tmpdirname:
            modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True)
            lora_state_dicts = self._get_lora_state_dicts(modules_to_save)
            self.pipeline_class.save_lora_weights(
                save_directory=tmpdirname, safe_serialization=False, **lora_state_dicts
            )
            pipe.unload_lora_weights()
            self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.bin")))
            state_dict = torch.load(os.path.join(tmpdirname, "pytorch_lora_weights.bin"), weights_only=True)

        unexpected_key = [k for k in state_dict if "lora_A" in k][0] + ".diffusers_cat"
        state_dict[unexpected_key] = torch.tensor(1.0, device=torch_device)

1741
        logger = logging.get_logger("diffusers.utils.peft_utils")
1742
1743
1744
1745
1746
1747
        logger.setLevel(30)
        with CaptureLogger(logger) as cap_logger:
            pipe.load_lora_weights(state_dict)

        self.assertTrue(".diffusers_cat" in cap_logger.out)

1748
    @unittest.skip("This is failing for now - need to investigate")
1749
    def test_simple_inference_with_text_denoiser_lora_unfused_torch_compile(self):
1750
1751
1752
1753
        """
        Tests a simple inference with lora attached to text encoder and unet, then unloads the lora weights
        and makes sure it works as expected
        """
1754
1755
1756
1757
1758
1759
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
        pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config)
1760

1761
1762
        pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
        pipe.text_encoder = torch.compile(pipe.text_encoder, mode="reduce-overhead", fullgraph=True)
1763

1764
1765
        if self.has_two_text_encoders or self.has_three_text_encoders:
            pipe.text_encoder_2 = torch.compile(pipe.text_encoder_2, mode="reduce-overhead", fullgraph=True)
1766

1767
1768
        # Just makes sure it works.
        _ = pipe(**inputs, generator=torch.manual_seed(0))[0]
1769
1770
1771
1772
1773
1774
1775

    def test_modify_padding_mode(self):
        def set_pad_mode(network, mode="circular"):
            for _, module in network.named_modules():
                if isinstance(module, torch.nn.Conv2d):
                    module.padding_mode = mode

1776
1777
1778
1779
1780
1781
1782
        components, _, _ = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _pad_mode = "circular"
        set_pad_mode(pipe.vae, _pad_mode)
        set_pad_mode(pipe.unet, _pad_mode)
1783

1784
1785
        _, _, inputs = self.get_dummy_inputs()
        _ = pipe(**inputs)[0]
1786

1787
1788
    def test_logs_info_when_no_lora_keys_found(self):
        # Skip text encoder check for now as that is handled with `transformers`.
1789
        components, _, _ = self.get_dummy_components()
1790
1791
1792
1793
1794
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)

        _, _, inputs = self.get_dummy_inputs(with_generator=False)
1795
        output_no_lora = self.get_base_pipe_output()
1796
1797
1798

        no_op_state_dict = {"lora_foo": torch.tensor(2.0), "lora_bar": torch.tensor(3.0)}
        logger = logging.get_logger("diffusers.loaders.peft")
1799
        logger.setLevel(logging.WARNING)
1800
1801
1802
1803
1804
1805
1806

        with CaptureLogger(logger) as cap_logger:
            pipe.load_lora_weights(no_op_state_dict)
        out_after_lora_attempt = pipe(**inputs, generator=torch.manual_seed(0))[0]

        denoiser = getattr(pipe, "unet") if self.unet_kwargs is not None else getattr(pipe, "transformer")
        self.assertTrue(cap_logger.out.startswith(f"No LoRA keys associated to {denoiser.__class__.__name__}"))
1807
        self.assertTrue(np.allclose(output_no_lora, out_after_lora_attempt, atol=1e-5, rtol=1e-5))
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818

        # test only for text encoder
        for lora_module in self.pipeline_class._lora_loadable_modules:
            if "text_encoder" in lora_module:
                text_encoder = getattr(pipe, lora_module)
                if lora_module == "text_encoder":
                    prefix = "text_encoder"
                elif lora_module == "text_encoder_2":
                    prefix = "text_encoder_2"

                logger = logging.get_logger("diffusers.loaders.lora_base")
1819
                logger.setLevel(logging.WARNING)
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829

                with CaptureLogger(logger) as cap_logger:
                    self.pipeline_class.load_lora_into_text_encoder(
                        no_op_state_dict, network_alphas=None, text_encoder=text_encoder, prefix=prefix
                    )

                self.assertTrue(
                    cap_logger.out.startswith(f"No LoRA keys associated to {text_encoder.__class__.__name__}")
                )

1830
1831
    def test_set_adapters_match_attention_kwargs(self):
        """Test to check if outputs after `set_adapters()` and attention kwargs match."""
1832
        attention_kwargs_name = determine_attention_kwargs_name(self.pipeline_class)
1833
1834
1835
1836
1837
1838
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)

1839
        output_no_lora = self.get_base_pipe_output()
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
        pipe, _ = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config)

        lora_scale = 0.5
        attention_kwargs = {attention_kwargs_name: {"scale": lora_scale}}
        output_lora_scale = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0]
        self.assertFalse(
            np.allclose(output_no_lora, output_lora_scale, atol=1e-3, rtol=1e-3),
            "Lora + scale should change the output",
        )

        pipe.set_adapters("default", lora_scale)
        output_lora_scale_wo_kwargs = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertTrue(
            not np.allclose(output_no_lora, output_lora_scale_wo_kwargs, atol=1e-3, rtol=1e-3),
            "Lora + scale should change the output",
        )
        self.assertTrue(
            np.allclose(output_lora_scale, output_lora_scale_wo_kwargs, atol=1e-3, rtol=1e-3),
            "Lora + scale should match the output of `set_adapters()`.",
        )

        with tempfile.TemporaryDirectory() as tmpdirname:
            modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True)
            lora_state_dicts = self._get_lora_state_dicts(modules_to_save)
            self.pipeline_class.save_lora_weights(
                save_directory=tmpdirname, safe_serialization=True, **lora_state_dicts
            )
1867

1868
            self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")))
1869
1870
1871
            pipe = self.pipeline_class(**components)
            pipe = pipe.to(torch_device)
            pipe.set_progress_bar_config(disable=None)
1872
            pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))
1873

1874
1875
            for module_name, module in modules_to_save.items():
                self.assertTrue(check_if_lora_correctly_set(module), f"Lora not correctly set in {module_name}")
1876

1877
1878
1879
            output_lora_from_pretrained = pipe(**inputs, generator=torch.manual_seed(0), **attention_kwargs)[0]
            self.assertTrue(
                not np.allclose(output_no_lora, output_lora_from_pretrained, atol=1e-3, rtol=1e-3),
1880
1881
1882
                "Lora + scale should change the output",
            )
            self.assertTrue(
1883
1884
                np.allclose(output_lora_scale, output_lora_from_pretrained, atol=1e-3, rtol=1e-3),
                "Loading from saved checkpoints should give same results as attention_kwargs.",
1885
1886
            )
            self.assertTrue(
1887
1888
                np.allclose(output_lora_scale_wo_kwargs, output_lora_from_pretrained, atol=1e-3, rtol=1e-3),
                "Loading from saved checkpoints should give same results as set_adapters().",
1889
1890
            )

1891
1892
1893
1894
    @require_peft_version_greater("0.13.2")
    def test_lora_B_bias(self):
        # Currently, this test is only relevant for Flux Control LoRA as we are not
        # aware of any other LoRA checkpoint that has its `lora_B` biases trained.
1895
        components, _, denoiser_lora_config = self.get_dummy_components()
1896
1897
1898
1899
1900
1901
1902
1903
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)

        # keep track of the bias values of the base layers to perform checks later.
        bias_values = {}
        denoiser = pipe.unet if self.unet_kwargs is not None else pipe.transformer
        for name, module in denoiser.named_modules():
1904
            if any(k in name for k in self.denoiser_target_modules):
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
                if module.bias is not None:
                    bias_values[name] = module.bias.data.clone()

        _, _, inputs = self.get_dummy_inputs(with_generator=False)

        original_output = pipe(**inputs, generator=torch.manual_seed(0))[0]

        denoiser_lora_config.lora_bias = False
        if self.unet_kwargs is not None:
            pipe.unet.add_adapter(denoiser_lora_config, "adapter-1")
        else:
            pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1")
        lora_bias_false_output = pipe(**inputs, generator=torch.manual_seed(0))[0]
        pipe.delete_adapters("adapter-1")

        denoiser_lora_config.lora_bias = True
        if self.unet_kwargs is not None:
            pipe.unet.add_adapter(denoiser_lora_config, "adapter-1")
        else:
            pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1")
        lora_bias_true_output = pipe(**inputs, generator=torch.manual_seed(0))[0]

        self.assertFalse(np.allclose(original_output, lora_bias_false_output, atol=1e-3, rtol=1e-3))
        self.assertFalse(np.allclose(original_output, lora_bias_true_output, atol=1e-3, rtol=1e-3))
        self.assertFalse(np.allclose(lora_bias_false_output, lora_bias_true_output, atol=1e-3, rtol=1e-3))

    def test_correct_lora_configs_with_different_ranks(self):
1932
        components, _, denoiser_lora_config = self.get_dummy_components()
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)

        original_output = pipe(**inputs, generator=torch.manual_seed(0))[0]

        if self.unet_kwargs is not None:
            pipe.unet.add_adapter(denoiser_lora_config, "adapter-1")
        else:
            pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1")

        lora_output_same_rank = pipe(**inputs, generator=torch.manual_seed(0))[0]

        if self.unet_kwargs is not None:
            pipe.unet.delete_adapters("adapter-1")
        else:
            pipe.transformer.delete_adapters("adapter-1")

        denoiser = pipe.unet if self.unet_kwargs is not None else pipe.transformer
        for name, _ in denoiser.named_modules():
            if "to_k" in name and "attn" in name and "lora" not in name:
                module_name_to_rank_update = name.replace(".base_layer.", ".")
                break

        # change the rank_pattern
        updated_rank = denoiser_lora_config.r * 2
        denoiser_lora_config.rank_pattern = {module_name_to_rank_update: updated_rank}

        if self.unet_kwargs is not None:
            pipe.unet.add_adapter(denoiser_lora_config, "adapter-1")
            updated_rank_pattern = pipe.unet.peft_config["adapter-1"].rank_pattern
        else:
            pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1")
            updated_rank_pattern = pipe.transformer.peft_config["adapter-1"].rank_pattern

        self.assertTrue(updated_rank_pattern == {module_name_to_rank_update: updated_rank})

        lora_output_diff_rank = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertTrue(not np.allclose(original_output, lora_output_same_rank, atol=1e-3, rtol=1e-3))
        self.assertTrue(not np.allclose(lora_output_diff_rank, lora_output_same_rank, atol=1e-3, rtol=1e-3))

        if self.unet_kwargs is not None:
            pipe.unet.delete_adapters("adapter-1")
        else:
            pipe.transformer.delete_adapters("adapter-1")

        # similarly change the alpha_pattern
        updated_alpha = denoiser_lora_config.lora_alpha * 2
        denoiser_lora_config.alpha_pattern = {module_name_to_rank_update: updated_alpha}
        if self.unet_kwargs is not None:
            pipe.unet.add_adapter(denoiser_lora_config, "adapter-1")
            self.assertTrue(
                pipe.unet.peft_config["adapter-1"].alpha_pattern == {module_name_to_rank_update: updated_alpha}
            )
        else:
            pipe.transformer.add_adapter(denoiser_lora_config, "adapter-1")
            self.assertTrue(
                pipe.transformer.peft_config["adapter-1"].alpha_pattern == {module_name_to_rank_update: updated_alpha}
            )

        lora_output_diff_alpha = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertTrue(not np.allclose(original_output, lora_output_diff_alpha, atol=1e-3, rtol=1e-3))
        self.assertTrue(not np.allclose(lora_output_diff_alpha, lora_output_same_rank, atol=1e-3, rtol=1e-3))
Aryan's avatar
Aryan committed
1997
1998

    def test_layerwise_casting_inference_denoiser(self):
1999
2000
        from diffusers.hooks._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS
        from diffusers.hooks.layerwise_casting import DEFAULT_SKIP_MODULES_PATTERN
Aryan's avatar
Aryan committed
2001
2002
2003
2004
2005
2006

        def check_linear_dtype(module, storage_dtype, compute_dtype):
            patterns_to_check = DEFAULT_SKIP_MODULES_PATTERN
            if getattr(module, "_skip_layerwise_casting_patterns", None) is not None:
                patterns_to_check += tuple(module._skip_layerwise_casting_patterns)
            for name, submodule in module.named_modules():
2007
                if not isinstance(submodule, _GO_LC_SUPPORTED_PYTORCH_LAYERS):
Aryan's avatar
Aryan committed
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
                    continue
                dtype_to_check = storage_dtype
                if "lora" in name or any(re.search(pattern, name) for pattern in patterns_to_check):
                    dtype_to_check = compute_dtype
                if getattr(submodule, "weight", None) is not None:
                    self.assertEqual(submodule.weight.dtype, dtype_to_check)
                if getattr(submodule, "bias", None) is not None:
                    self.assertEqual(submodule.bias.dtype, dtype_to_check)

        def initialize_pipeline(storage_dtype=None, compute_dtype=torch.float32):
2018
            components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
Aryan's avatar
Aryan committed
2019
2020
2021
2022
            pipe = self.pipeline_class(**components)
            pipe = pipe.to(torch_device, dtype=compute_dtype)
            pipe.set_progress_bar_config(disable=None)

2023
            pipe, denoiser = self.add_adapters_to_pipeline(pipe, text_lora_config, denoiser_lora_config)
Aryan's avatar
Aryan committed
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040

            if storage_dtype is not None:
                denoiser.enable_layerwise_casting(storage_dtype=storage_dtype, compute_dtype=compute_dtype)
                check_linear_dtype(denoiser, storage_dtype, compute_dtype)

            return pipe

        _, _, inputs = self.get_dummy_inputs(with_generator=False)

        pipe_fp32 = initialize_pipeline(storage_dtype=None)
        pipe_fp32(**inputs, generator=torch.manual_seed(0))[0]

        pipe_float8_e4m3_fp32 = initialize_pipeline(storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.float32)
        pipe_float8_e4m3_fp32(**inputs, generator=torch.manual_seed(0))[0]

        pipe_float8_e4m3_bf16 = initialize_pipeline(storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16)
        pipe_float8_e4m3_bf16(**inputs, generator=torch.manual_seed(0))[0]
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057

    @require_peft_version_greater("0.14.0")
    def test_layerwise_casting_peft_input_autocast_denoiser(self):
        r"""
        A test that checks if layerwise casting works correctly with PEFT layers and forward pass does not fail. This
        is different from `test_layerwise_casting_inference_denoiser` as that disables the application of layerwise
        cast hooks on the PEFT layers (relevant logic in `models.modeling_utils.ModelMixin.enable_layerwise_casting`).
        In this test, we enable the layerwise casting on the PEFT layers as well. If run with PEFT version <= 0.14.0,
        this test will fail with the following error:

        ```
        RuntimeError: expected mat1 and mat2 to have the same dtype, but got: c10::Float8_e4m3fn != float
        ```

        See the docstring of [`hooks.layerwise_casting.PeftInputAutocastDisableHook`] for more details.
        """

2058
        from diffusers.hooks._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
        from diffusers.hooks.layerwise_casting import (
            _PEFT_AUTOCAST_DISABLE_HOOK,
            DEFAULT_SKIP_MODULES_PATTERN,
            apply_layerwise_casting,
        )

        storage_dtype = torch.float8_e4m3fn
        compute_dtype = torch.float32

        def check_module(denoiser):
            # This will also check if the peft layers are in torch.float8_e4m3fn dtype (unlike test_layerwise_casting_inference_denoiser)
            for name, module in denoiser.named_modules():
2071
                if not isinstance(module, _GO_LC_SUPPORTED_PYTORCH_LAYERS):
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
                    continue
                dtype_to_check = storage_dtype
                if any(re.search(pattern, name) for pattern in patterns_to_check):
                    dtype_to_check = compute_dtype
                if getattr(module, "weight", None) is not None:
                    self.assertEqual(module.weight.dtype, dtype_to_check)
                if getattr(module, "bias", None) is not None:
                    self.assertEqual(module.bias.dtype, dtype_to_check)
                if isinstance(module, BaseTunerLayer):
                    self.assertTrue(getattr(module, "_diffusers_hook", None) is not None)
                    self.assertTrue(module._diffusers_hook.get_hook(_PEFT_AUTOCAST_DISABLE_HOOK) is not None)

        # 1. Test forward with add_adapter
2085
        components, _, denoiser_lora_config = self.get_dummy_components()
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device, dtype=compute_dtype)
        pipe.set_progress_bar_config(disable=None)

        denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
        denoiser.add_adapter(denoiser_lora_config)
        self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")

        patterns_to_check = DEFAULT_SKIP_MODULES_PATTERN
        if getattr(denoiser, "_skip_layerwise_casting_patterns", None) is not None:
            patterns_to_check += tuple(denoiser._skip_layerwise_casting_patterns)

        apply_layerwise_casting(
            denoiser, storage_dtype=storage_dtype, compute_dtype=compute_dtype, skip_modules_pattern=patterns_to_check
        )
        check_module(denoiser)

        _, _, inputs = self.get_dummy_inputs(with_generator=False)
        pipe(**inputs, generator=torch.manual_seed(0))[0]

        # 2. Test forward with load_lora_weights
        with tempfile.TemporaryDirectory() as tmpdirname:
            modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True)
            lora_state_dicts = self._get_lora_state_dicts(modules_to_save)
            self.pipeline_class.save_lora_weights(
                save_directory=tmpdirname, safe_serialization=True, **lora_state_dicts
            )

            self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")))
2115
            components, _, _ = self.get_dummy_components()
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
            pipe = self.pipeline_class(**components)
            pipe = pipe.to(torch_device, dtype=compute_dtype)
            pipe.set_progress_bar_config(disable=None)
            pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))

            denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
            apply_layerwise_casting(
                denoiser,
                storage_dtype=storage_dtype,
                compute_dtype=compute_dtype,
                skip_modules_pattern=patterns_to_check,
            )
            check_module(denoiser)

            _, _, inputs = self.get_dummy_inputs(with_generator=False)
            pipe(**inputs, generator=torch.manual_seed(0))[0]
2132

2133
2134
    @parameterized.expand([4, 8, 16])
    def test_lora_adapter_metadata_is_loaded_correctly(self, lora_alpha):
2135
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components(lora_alpha=lora_alpha)
2136
2137
        pipe = self.pipeline_class(**components)

2138
        pipe, _ = self.add_adapters_to_pipeline(
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
            pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config
        )

        with tempfile.TemporaryDirectory() as tmpdir:
            modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True)
            lora_state_dicts = self._get_lora_state_dicts(modules_to_save)
            lora_metadatas = self._get_lora_adapter_metadata(modules_to_save)
            self.pipeline_class.save_lora_weights(save_directory=tmpdir, **lora_state_dicts, **lora_metadatas)
            pipe.unload_lora_weights()

            out = pipe.lora_state_dict(tmpdir, return_lora_metadata=True)
            if len(out) == 3:
                _, _, parsed_metadata = out
            elif len(out) == 2:
                _, parsed_metadata = out

            denoiser_key = (
                f"{self.pipeline_class.transformer_name}"
                if self.transformer_kwargs is not None
                else f"{self.pipeline_class.unet_name}"
            )
            self.assertTrue(any(k.startswith(f"{denoiser_key}.") for k in parsed_metadata))
            check_module_lora_metadata(
                parsed_metadata=parsed_metadata, lora_metadatas=lora_metadatas, module_key=denoiser_key
            )

            if "text_encoder" in self.pipeline_class._lora_loadable_modules:
                text_encoder_key = self.pipeline_class.text_encoder_name
                self.assertTrue(any(k.startswith(f"{text_encoder_key}.") for k in parsed_metadata))
                check_module_lora_metadata(
                    parsed_metadata=parsed_metadata, lora_metadatas=lora_metadatas, module_key=text_encoder_key
                )

            if "text_encoder_2" in self.pipeline_class._lora_loadable_modules:
                text_encoder_2_key = "text_encoder_2"
                self.assertTrue(any(k.startswith(f"{text_encoder_2_key}.") for k in parsed_metadata))
                check_module_lora_metadata(
                    parsed_metadata=parsed_metadata, lora_metadatas=lora_metadatas, module_key=text_encoder_2_key
                )

    @parameterized.expand([4, 8, 16])
    def test_lora_adapter_metadata_save_load_inference(self, lora_alpha):
2181
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components(lora_alpha=lora_alpha)
2182
2183
2184
        pipe = self.pipeline_class(**components).to(torch_device)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)

2185
        pipe, _ = self.add_adapters_to_pipeline(
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
            pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config
        )
        output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]

        with tempfile.TemporaryDirectory() as tmpdir:
            modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True)
            lora_state_dicts = self._get_lora_state_dicts(modules_to_save)
            lora_metadatas = self._get_lora_adapter_metadata(modules_to_save)
            self.pipeline_class.save_lora_weights(save_directory=tmpdir, **lora_state_dicts, **lora_metadatas)
            pipe.unload_lora_weights()
            pipe.load_lora_weights(tmpdir)

            output_lora_pretrained = pipe(**inputs, generator=torch.manual_seed(0))[0]

            self.assertTrue(
                np.allclose(output_lora, output_lora_pretrained, atol=1e-3, rtol=1e-3), "Lora outputs should match."
            )

2204
2205
    def test_lora_unload_add_adapter(self):
        """Tests if `unload_lora_weights()` -> `add_adapter()` works."""
2206
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
        pipe = self.pipeline_class(**components).to(torch_device)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)

        pipe, _ = self.add_adapters_to_pipeline(
            pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config
        )
        _ = pipe(**inputs, generator=torch.manual_seed(0))[0]

        # unload and then add.
        pipe.unload_lora_weights()
        pipe, _ = self.add_adapters_to_pipeline(
            pipe, text_lora_config=text_lora_config, denoiser_lora_config=denoiser_lora_config
        )
        _ = pipe(**inputs, generator=torch.manual_seed(0))[0]

2222
2223
    def test_inference_load_delete_load_adapters(self):
        "Tests if `load_lora_weights()` -> `delete_adapters()` -> `load_lora_weights()` works."
2224
2225
2226
2227
2228
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
2229

2230
        output_no_lora = self.get_base_pipe_output()
2231

2232
2233
2234
        if "text_encoder" in self.pipeline_class._lora_loadable_modules:
            pipe.text_encoder.add_adapter(text_lora_config)
            self.assertTrue(check_if_lora_correctly_set(pipe.text_encoder), "Lora not correctly set in text encoder")
2235

2236
2237
2238
        denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
        denoiser.add_adapter(denoiser_lora_config)
        self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")
2239

2240
2241
2242
2243
2244
2245
2246
        if self.has_two_text_encoders or self.has_three_text_encoders:
            lora_loadable_components = self.pipeline_class._lora_loadable_modules
            if "text_encoder_2" in lora_loadable_components:
                pipe.text_encoder_2.add_adapter(text_lora_config)
                self.assertTrue(
                    check_if_lora_correctly_set(pipe.text_encoder_2), "Lora not correctly set in text encoder 2"
                )
2247

2248
        output_adapter_1 = pipe(**inputs, generator=torch.manual_seed(0))[0]
2249

2250
2251
2252
2253
2254
        with tempfile.TemporaryDirectory() as tmpdirname:
            modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True)
            lora_state_dicts = self._get_lora_state_dicts(modules_to_save)
            self.pipeline_class.save_lora_weights(save_directory=tmpdirname, **lora_state_dicts)
            self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")))
2255

2256
2257
2258
2259
2260
            # First, delete adapter and compare.
            pipe.delete_adapters(pipe.get_active_adapters()[0])
            output_no_adapter = pipe(**inputs, generator=torch.manual_seed(0))[0]
            self.assertFalse(np.allclose(output_adapter_1, output_no_adapter, atol=1e-3, rtol=1e-3))
            self.assertTrue(np.allclose(output_no_lora, output_no_adapter, atol=1e-3, rtol=1e-3))
2261

2262
2263
2264
2265
            # Then load adapter and compare.
            pipe.load_lora_weights(tmpdirname)
            output_lora_loaded = pipe(**inputs, generator=torch.manual_seed(0))[0]
            self.assertTrue(np.allclose(output_adapter_1, output_lora_loaded, atol=1e-3, rtol=1e-3))
2266
2267
2268
2269
2270
2271
2272

    def _test_group_offloading_inference_denoiser(self, offload_type, use_stream):
        from diffusers.hooks.group_offloading import _get_top_level_group_offload_hook

        onload_device = torch_device
        offload_device = torch.device("cpu")

2273
        components, text_lora_config, denoiser_lora_config = self.get_dummy_components()
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)

        denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
        denoiser.add_adapter(denoiser_lora_config)
        self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")

        with tempfile.TemporaryDirectory() as tmpdirname:
            modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True)
            lora_state_dicts = self._get_lora_state_dicts(modules_to_save)
            self.pipeline_class.save_lora_weights(
                save_directory=tmpdirname, safe_serialization=True, **lora_state_dicts
            )
            self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors")))

2290
            components, _, _ = self.get_dummy_components()
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
            pipe = self.pipeline_class(**components)
            pipe.set_progress_bar_config(disable=None)
            denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet

            pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))
            check_if_lora_correctly_set(denoiser)
            _, _, inputs = self.get_dummy_inputs(with_generator=False)

            # Test group offloading with load_lora_weights
            denoiser.enable_group_offload(
                onload_device=onload_device,
                offload_device=offload_device,
                offload_type=offload_type,
                num_blocks_per_group=1,
                use_stream=use_stream,
            )
2307
2308
2309
2310
            # Place other model-level components on `torch_device`.
            for _, component in pipe.components.items():
                if isinstance(component, torch.nn.Module):
                    component.to(torch_device)
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
            group_offload_hook_1 = _get_top_level_group_offload_hook(denoiser)
            self.assertTrue(group_offload_hook_1 is not None)
            output_1 = pipe(**inputs, generator=torch.manual_seed(0))[0]

            # Test group offloading after removing the lora
            pipe.unload_lora_weights()
            group_offload_hook_2 = _get_top_level_group_offload_hook(denoiser)
            self.assertTrue(group_offload_hook_2 is not None)
            output_2 = pipe(**inputs, generator=torch.manual_seed(0))[0]  # noqa: F841

            # Add the lora again and check if group offloading works
            pipe.load_lora_weights(os.path.join(tmpdirname, "pytorch_lora_weights.safetensors"))
            check_if_lora_correctly_set(denoiser)
            group_offload_hook_3 = _get_top_level_group_offload_hook(denoiser)
            self.assertTrue(group_offload_hook_3 is not None)
            output_3 = pipe(**inputs, generator=torch.manual_seed(0))[0]

            self.assertTrue(np.allclose(output_1, output_3, atol=1e-3, rtol=1e-3))

    @parameterized.expand([("block_level", True), ("leaf_level", False), ("leaf_level", True)])
    @require_torch_accelerator
    def test_group_offloading_inference_denoiser(self, offload_type, use_stream):
        for cls in inspect.getmro(self.__class__):
            if "test_group_offloading_inference_denoiser" in cls.__dict__ and cls is not PeftLoraLoaderMixinTests:
                # Skip this test if it is overwritten by child class. We need to do this because parameterized
                # materializes the test methods on invocation which cannot be overridden.
                return
        self._test_group_offloading_inference_denoiser(offload_type, use_stream)
2339
2340
2341

    @require_torch_accelerator
    def test_lora_loading_model_cpu_offload(self):
2342
        components, _, denoiser_lora_config = self.get_dummy_components()
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
        _, _, inputs = self.get_dummy_inputs(with_generator=False)
        pipe = self.pipeline_class(**components)
        pipe = pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)

        denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
        denoiser.add_adapter(denoiser_lora_config)
        self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")

        output_lora = pipe(**inputs, generator=torch.manual_seed(0))[0]

        with tempfile.TemporaryDirectory() as tmpdirname:
            modules_to_save = self._get_modules_to_save(pipe, has_denoiser=True)
            lora_state_dicts = self._get_lora_state_dicts(modules_to_save)
            self.pipeline_class.save_lora_weights(
                save_directory=tmpdirname, safe_serialization=True, **lora_state_dicts
            )
            # reinitialize the pipeline to mimic the inference workflow.
2361
            components, _, denoiser_lora_config = self.get_dummy_components()
2362
2363
2364
2365
2366
2367
2368
2369
            pipe = self.pipeline_class(**components)
            pipe.enable_model_cpu_offload(device=torch_device)
            pipe.load_lora_weights(tmpdirname)
            denoiser = pipe.transformer if self.unet_kwargs is None else pipe.unet
            self.assertTrue(check_if_lora_correctly_set(denoiser), "Lora not correctly set in denoiser.")

        output_lora_loaded = pipe(**inputs, generator=torch.manual_seed(0))[0]
        self.assertTrue(np.allclose(output_lora, output_lora_loaded, atol=1e-3, rtol=1e-3))