test_modeling_common.py 110 KB
Newer Older
1
# coding=utf-8
2
# Copyright 2025 HuggingFace Inc.
3
4
5
6
7
8
9
10
11
12
13
14
15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

16
import copy
Aryan's avatar
Aryan committed
17
import gc
18
import glob
19
import inspect
20
21
import json
import os
Aryan's avatar
Aryan committed
22
import re
23
import tempfile
24
import traceback
25
import unittest
26
import unittest.mock as mock
27
import uuid
28
29
from collections import defaultdict
from typing import Dict, List, Optional, Tuple, Union
30
31

import numpy as np
32
import pytest
33
import requests_mock
34
import safetensors.torch
35
import torch
36
import torch.nn as nn
YiYi Xu's avatar
YiYi Xu committed
37
from accelerate.utils.modeling import _get_proper_dtype, compute_module_sizes, dtype_byte_size
38
from huggingface_hub import ModelCard, delete_repo, snapshot_download, try_to_load_from_cache
39
from huggingface_hub.utils import HfHubHTTPError, is_jinja_available
40
from parameterized import parameterized
41

42
from diffusers.models import FluxTransformer2DModel, SD3Transformer2DModel, UNet2DConditionModel
43
44
45
46
47
48
from diffusers.models.attention_processor import (
    AttnProcessor,
    AttnProcessor2_0,
    AttnProcessorNPU,
    XFormersAttnProcessor,
)
hlky's avatar
hlky committed
49
from diffusers.models.auto_model import AutoModel
50
from diffusers.models.modeling_outputs import BaseOutput
51
from diffusers.training_utils import EMAModel
52
53
54
from diffusers.utils import (
    SAFE_WEIGHTS_INDEX_NAME,
    WEIGHTS_INDEX_NAME,
55
    is_peft_available,
56
57
58
59
    is_torch_npu_available,
    is_xformers_available,
    logging,
)
60
from diffusers.utils.hub_utils import _add_variant
61
62
63
64
from diffusers.utils.torch_utils import get_torch_cuda_device_capability

from ..others.test_utils import TOKEN, USER, is_staging_test
from ..testing_utils import (
65
    CaptureLogger,
66
    _check_safetensors_serialization,
67
    backend_empty_cache,
68
69
70
    backend_max_memory_allocated,
    backend_reset_peak_memory_stats,
    backend_synchronize,
71
    check_if_dicts_are_equal,
72
    get_python_version,
73
    is_torch_compile,
Aryan's avatar
Aryan committed
74
    numpy_cosine_similarity_distance,
75
76
    require_peft_backend,
    require_peft_version_greater,
77
    require_torch_2,
78
    require_torch_accelerator,
Arsalan's avatar
Arsalan committed
79
    require_torch_accelerator_with_training,
80
    require_torch_multi_accelerator,
81
    require_torch_version_greater,
82
    run_test_in_subprocess,
83
    slow,
84
    torch_all_close,
Dhruv Nair's avatar
Dhruv Nair committed
85
    torch_device,
86
)
87
88


89
90
91
92
if is_peft_available():
    from peft.tuners.tuners_utils import BaseTunerLayer


93
94
95
96
97
98
99
100
101
def caculate_expected_num_shards(index_map_path):
    with open(index_map_path) as f:
        weight_map_dict = json.load(f)["weight_map"]
    first_key = list(weight_map_dict.keys())[0]
    weight_loc = weight_map_dict[first_key]  # e.g., diffusion_pytorch_model-00001-of-00002.safetensors
    expected_num_shards = int(weight_loc.split("-")[-1].split(".")[0])
    return expected_num_shards


102
103
104
105
106
107
108
109
110
111
def check_if_lora_correctly_set(model) -> bool:
    """
    Checks if the LoRA layers are correctly set with peft
    """
    for module in model.modules():
        if isinstance(module, BaseTunerLayer):
            return True
    return False


112
113
114
115
116
def normalize_output(out):
    out0 = out[0] if isinstance(out, (BaseOutput, tuple)) else out
    return torch.stack(out0) if isinstance(out0, list) else out0


117
118
119
120
121
122
123
124
125
126
127
# Will be run via run_test_in_subprocess
def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout):
    error = None
    try:
        init_dict, model_class = in_queue.get(timeout=timeout)

        model = model_class(**init_dict)
        model.to(torch_device)
        model = torch.compile(model)

        with tempfile.TemporaryDirectory() as tmpdirname:
128
            model.save_pretrained(tmpdirname, safe_serialization=False)
129
130
131
132
133
134
135
136
137
138
            new_model = model_class.from_pretrained(tmpdirname)
            new_model.to(torch_device)

        assert new_model.__class__ == model_class
    except Exception:
        error = f"{traceback.format_exc()}"

    results = {"error": error}
    out_queue.put(results, timeout=timeout)
    out_queue.join()
139
140


141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def named_persistent_module_tensors(
    module: nn.Module,
    recurse: bool = False,
):
    """
    A helper function that gathers all the tensors (parameters + persistent buffers) of a given module.

    Args:
        module (`torch.nn.Module`):
            The module we want the tensors on.
        recurse (`bool`, *optional`, defaults to `False`):
            Whether or not to go look in every submodule or just return the direct parameters and buffers.
    """
    yield from module.named_parameters(recurse=recurse)

    for named_buffer in module.named_buffers(recurse=recurse):
        name, _ = named_buffer
        # Get parent by splitting on dots and traversing the model
        parent = module
        if "." in name:
            parent_name = name.rsplit(".", 1)[0]
            for part in parent_name.split("."):
                parent = getattr(parent, part)
            name = name.split(".")[-1]
        if name not in parent._non_persistent_buffers_set:
            yield named_buffer


def compute_module_persistent_sizes(
    model: nn.Module,
    dtype: Optional[Union[str, torch.device]] = None,
    special_dtypes: Optional[Dict[str, Union[str, torch.device]]] = None,
):
    """
    Compute the size of each submodule of a given model (parameters + persistent buffers).
    """
    if dtype is not None:
        dtype = _get_proper_dtype(dtype)
        dtype_size = dtype_byte_size(dtype)
    if special_dtypes is not None:
        special_dtypes = {key: _get_proper_dtype(dtyp) for key, dtyp in special_dtypes.items()}
        special_dtypes_size = {key: dtype_byte_size(dtyp) for key, dtyp in special_dtypes.items()}
    module_sizes = defaultdict(int)

    module_list = []

    module_list = named_persistent_module_tensors(model, recurse=True)

    for name, tensor in module_list:
        if special_dtypes is not None and name in special_dtypes:
            size = tensor.numel() * special_dtypes_size[name]
        elif dtype is None:
            size = tensor.numel() * dtype_byte_size(tensor.dtype)
        elif str(tensor.dtype).startswith(("torch.uint", "torch.int", "torch.bool")):
            # According to the code in set_module_tensor_to_device, these types won't be converted
            # so use their original size here
            size = tensor.numel() * dtype_byte_size(tensor.dtype)
        else:
            size = tensor.numel() * min(dtype_size, dtype_byte_size(tensor.dtype))
        name_parts = name.split(".")
        for idx in range(len(name_parts) + 1):
            module_sizes[".".join(name_parts[:idx])] += size

    return module_sizes


Aryan's avatar
Aryan committed
207
208
209
210
211
212
213
214
215
216
def cast_maybe_tensor_dtype(maybe_tensor, current_dtype, target_dtype):
    if torch.is_tensor(maybe_tensor):
        return maybe_tensor.to(target_dtype) if maybe_tensor.dtype == current_dtype else maybe_tensor
    if isinstance(maybe_tensor, dict):
        return {k: cast_maybe_tensor_dtype(v, current_dtype, target_dtype) for k, v in maybe_tensor.items()}
    if isinstance(maybe_tensor, list):
        return [cast_maybe_tensor_dtype(v, current_dtype, target_dtype) for v in maybe_tensor]
    return maybe_tensor


217
class ModelUtilsTest(unittest.TestCase):
218
219
220
    def tearDown(self):
        super().tearDown()

221
222
    def test_missing_key_loading_warning_message(self):
        with self.assertLogs("diffusers.models.modeling_utils", level="WARNING") as logs:
223
224
225
            UNet2DConditionModel.from_pretrained("hf-internal-testing/stable-diffusion-broken", subfolder="unet")

        # make sure that error message states what keys are missing
226
        assert "conv_out.bias" in " ".join(logs.output)
227

228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
    @parameterized.expand(
        [
            ("hf-internal-testing/tiny-stable-diffusion-pipe-variants-all-kinds", "unet", False),
            ("hf-internal-testing/tiny-stable-diffusion-pipe-variants-all-kinds", "unet", True),
            ("hf-internal-testing/tiny-sd-unet-with-sharded-ckpt", None, False),
            ("hf-internal-testing/tiny-sd-unet-with-sharded-ckpt", None, True),
        ]
    )
    def test_variant_sharded_ckpt_legacy_format_raises_warning(self, repo_id, subfolder, use_local):
        def load_model(path):
            kwargs = {"variant": "fp16"}
            if subfolder:
                kwargs["subfolder"] = subfolder
            return UNet2DConditionModel.from_pretrained(path, **kwargs)

        with self.assertWarns(FutureWarning) as warning:
            if use_local:
                with tempfile.TemporaryDirectory() as tmpdirname:
                    tmpdirname = snapshot_download(repo_id=repo_id)
                    _ = load_model(tmpdirname)
            else:
                _ = load_model(repo_id)

251
252
        warning_messages = " ".join(str(w.message) for w in warning.warnings)
        self.assertIn("This serialization format is now deprecated to standardize the serialization", warning_messages)
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273

    # Local tests are already covered down below.
    @parameterized.expand(
        [
            ("hf-internal-testing/tiny-sd-unet-sharded-latest-format", None, "fp16"),
            ("hf-internal-testing/tiny-sd-unet-sharded-latest-format-subfolder", "unet", "fp16"),
            ("hf-internal-testing/tiny-sd-unet-sharded-no-variants", None, None),
            ("hf-internal-testing/tiny-sd-unet-sharded-no-variants-subfolder", "unet", None),
        ]
    )
    def test_variant_sharded_ckpt_loads_from_hub(self, repo_id, subfolder, variant=None):
        def load_model():
            kwargs = {}
            if variant:
                kwargs["variant"] = variant
            if subfolder:
                kwargs["subfolder"] = subfolder
            return UNet2DConditionModel.from_pretrained(repo_id, **kwargs)

        assert load_model()

274
275
276
277
278
    def test_cached_files_are_used_when_no_internet(self):
        # A mock response for an HTTP head request to emulate server down
        response_mock = mock.Mock()
        response_mock.status_code = 500
        response_mock.headers = {}
279
        response_mock.raise_for_status.side_effect = HfHubHTTPError("Server down", response=mock.Mock())
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
        response_mock.json.return_value = {}

        # Download this model to make sure it's in the cache.
        orig_model = UNet2DConditionModel.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet"
        )

        # Under the mock environment we get a 500 error when trying to reach the model.
        with mock.patch("requests.request", return_value=response_mock):
            # Download this model to make sure it's in the cache.
            model = UNet2DConditionModel.from_pretrained(
                "hf-internal-testing/tiny-stable-diffusion-torch", subfolder="unet", local_files_only=True
            )

        for p1, p2 in zip(orig_model.parameters(), model.parameters()):
            if p1.data.ne(p2.data).sum() > 0:
                assert False, "Parameters not the same!"

298
299
300
301
302
    def test_local_files_only_with_sharded_checkpoint(self):
        repo_id = "hf-internal-testing/tiny-flux-sharded"
        error_response = mock.Mock(
            status_code=500,
            headers={},
303
            raise_for_status=mock.Mock(side_effect=HfHubHTTPError("Server down", response=mock.Mock())),
304
305
            json=mock.Mock(return_value={}),
        )
306
307
        client_mock = mock.Mock()
        client_mock.get.return_value = error_response
308
309
310
311

        with tempfile.TemporaryDirectory() as tmpdir:
            model = FluxTransformer2DModel.from_pretrained(repo_id, subfolder="transformer", cache_dir=tmpdir)

312
            with mock.patch("huggingface_hub.hf_api.get_session", return_value=client_mock):
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
                # Should fail with local_files_only=False (network required)
                # We would make a network call with model_info
                with self.assertRaises(OSError):
                    FluxTransformer2DModel.from_pretrained(
                        repo_id, subfolder="transformer", cache_dir=tmpdir, local_files_only=False
                    )

                # Should succeed with local_files_only=True (uses cache)
                # model_info call skipped
                local_model = FluxTransformer2DModel.from_pretrained(
                    repo_id, subfolder="transformer", cache_dir=tmpdir, local_files_only=True
                )

            assert all(torch.equal(p1, p2) for p1, p2 in zip(model.parameters(), local_model.parameters())), (
                "Model parameters don't match!"
            )

            # Remove a shard file
            cached_shard_file = try_to_load_from_cache(
                repo_id, filename="transformer/diffusion_pytorch_model-00001-of-00002.safetensors", cache_dir=tmpdir
            )
            os.remove(cached_shard_file)

            # Attempting to load from cache should raise an error
            with self.assertRaises(OSError) as context:
                FluxTransformer2DModel.from_pretrained(
                    repo_id, subfolder="transformer", cache_dir=tmpdir, local_files_only=True
                )

            # Verify error mentions the missing shard
            error_msg = str(context.exception)
            assert cached_shard_file in error_msg or "required according to the checkpoint index" in error_msg, (
                f"Expected error about missing shard, got: {error_msg}"
            )

348
    @unittest.skip("Flaky behaviour on CI. Re-enable after migrating to new runners")
349
    @unittest.skipIf(torch_device == "mps", reason="Test not supported for MPS.")
350
    def test_one_request_upon_cached(self):
351
        use_safetensors = False
352
353
354
355

        with tempfile.TemporaryDirectory() as tmpdirname:
            with requests_mock.mock(real_http=True) as m:
                UNet2DConditionModel.from_pretrained(
356
357
358
359
                    "hf-internal-testing/tiny-stable-diffusion-torch",
                    subfolder="unet",
                    cache_dir=tmpdirname,
                    use_safetensors=use_safetensors,
360
361
362
                )

            download_requests = [r.method for r in m.request_history]
363
364
365
            assert download_requests.count("HEAD") == 3, (
                "3 HEAD requests one for config, one for model, and one for shard index file."
            )
366
367
368
369
            assert download_requests.count("GET") == 2, "2 GET requests one for config, one for model"

            with requests_mock.mock(real_http=True) as m:
                UNet2DConditionModel.from_pretrained(
370
371
372
373
                    "hf-internal-testing/tiny-stable-diffusion-torch",
                    subfolder="unet",
                    cache_dir=tmpdirname,
                    use_safetensors=use_safetensors,
374
375
376
                )

            cache_requests = [r.method for r in m.request_history]
377
378
379
            assert "HEAD" == cache_requests[0] and len(cache_requests) == 2, (
                "We should call only `model_info` to check for commit hash and  knowing if shard index is present."
            )
380

381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
    def test_weight_overwrite(self):
        with tempfile.TemporaryDirectory() as tmpdirname, self.assertRaises(ValueError) as error_context:
            UNet2DConditionModel.from_pretrained(
                "hf-internal-testing/tiny-stable-diffusion-torch",
                subfolder="unet",
                cache_dir=tmpdirname,
                in_channels=9,
            )

        # make sure that error message states what keys are missing
        assert "Cannot load" in str(error_context.exception)

        with tempfile.TemporaryDirectory() as tmpdirname:
            model = UNet2DConditionModel.from_pretrained(
                "hf-internal-testing/tiny-stable-diffusion-torch",
                subfolder="unet",
                cache_dir=tmpdirname,
                in_channels=9,
                low_cpu_mem_usage=False,
                ignore_mismatched_sizes=True,
            )

        assert model.config.in_channels == 9

405
    @require_torch_accelerator
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
    def test_keep_modules_in_fp32(self):
        r"""
        A simple tests to check if the modules under `_keep_in_fp32_modules` are kept in fp32 when we load the model in fp16/bf16
        Also ensures if inference works.
        """
        fp32_modules = SD3Transformer2DModel._keep_in_fp32_modules

        for torch_dtype in [torch.bfloat16, torch.float16]:
            SD3Transformer2DModel._keep_in_fp32_modules = ["proj_out"]

            model = SD3Transformer2DModel.from_pretrained(
                "hf-internal-testing/tiny-sd3-pipe", subfolder="transformer", torch_dtype=torch_dtype
            ).to(torch_device)

            for name, module in model.named_modules():
                if isinstance(module, torch.nn.Linear):
                    if name in model._keep_in_fp32_modules:
                        self.assertTrue(module.weight.dtype == torch.float32)
                    else:
                        self.assertTrue(module.weight.dtype == torch_dtype)

        def get_dummy_inputs():
            batch_size = 2
            num_channels = 4
            height = width = embedding_dim = 32
            pooled_embedding_dim = embedding_dim * 2
            sequence_length = 154

            hidden_states = torch.randn((batch_size, num_channels, height, width)).to(torch_device)
            encoder_hidden_states = torch.randn((batch_size, sequence_length, embedding_dim)).to(torch_device)
            pooled_prompt_embeds = torch.randn((batch_size, pooled_embedding_dim)).to(torch_device)
            timestep = torch.randint(0, 1000, size=(batch_size,)).to(torch_device)

            return {
                "hidden_states": hidden_states,
                "encoder_hidden_states": encoder_hidden_states,
                "pooled_projections": pooled_prompt_embeds,
                "timestep": timestep,
            }

        # test if inference works.
        with torch.no_grad() and torch.amp.autocast(torch_device, dtype=torch_dtype):
            input_dict_for_transformer = get_dummy_inputs()
            model_inputs = {
                k: v.to(device=torch_device) for k, v in input_dict_for_transformer.items() if not isinstance(v, bool)
            }
            model_inputs.update({k: v for k, v in input_dict_for_transformer.items() if k not in model_inputs})
            _ = model(**model_inputs)

        SD3Transformer2DModel._keep_in_fp32_modules = fp32_modules

457

458
class UNetTesterMixin:
459
460
461
462
463
464
    @staticmethod
    def _accepts_norm_num_groups(model_class):
        model_sig = inspect.signature(model_class.__init__)
        accepts_norm_groups = "norm_num_groups" in model_sig.parameters
        return accepts_norm_groups

465
    def test_forward_with_norm_groups(self):
466
467
        if not self._accepts_norm_num_groups(self.model_class):
            pytest.skip(f"Test not supported for {self.model_class.__name__}")
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

        init_dict["norm_num_groups"] = 16
        init_dict["block_out_channels"] = (16, 32)

        model = self.model_class(**init_dict)
        model.to(torch_device)
        model.eval()

        with torch.no_grad():
            output = model(**inputs_dict)

            if isinstance(output, dict):
                output = output.to_tuple()[0]

        self.assertIsNotNone(output)
        expected_shape = inputs_dict["sample"].shape
        self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")


488
class ModelTesterMixin:
489
490
    main_input_name = None  # overwrite in model specific tester class
    base_precision = 1e-3
Will Berman's avatar
Will Berman committed
491
    forward_requires_fresh_args = False
492
    model_split_percents = [0.5, 0.7, 0.9]
493
    uses_custom_attn_processor = False
494
495
496
497
498
499
500
501
502
503
504
505
506
507

    def check_device_map_is_respected(self, model, device_map):
        for param_name, param in model.named_parameters():
            # Find device in device_map
            while len(param_name) > 0 and param_name not in device_map:
                param_name = ".".join(param_name.split(".")[:-1])
            if param_name not in device_map:
                raise ValueError("device map is incomplete, it does not contain any device for `param_name`.")

            param_device = device_map[param_name]
            if param_device in ["cpu", "disk"]:
                self.assertEqual(param.device, torch.device("meta"))
            else:
                self.assertEqual(param.device, torch.device(param_device))
508

509
    def test_from_save_pretrained(self, expected_max_diff=5e-5):
Will Berman's avatar
Will Berman committed
510
511
512
513
514
        if self.forward_requires_fresh_args:
            model = self.model_class(**self.init_dict)
        else:
            init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
            model = self.model_class(**init_dict)
515

516
517
        if hasattr(model, "set_default_attn_processor"):
            model.set_default_attn_processor()
518
519
520
521
        model.to(torch_device)
        model.eval()

        with tempfile.TemporaryDirectory() as tmpdirname:
522
            model.save_pretrained(tmpdirname, safe_serialization=False)
523
            new_model = self.model_class.from_pretrained(tmpdirname)
524
525
            if hasattr(new_model, "set_default_attn_processor"):
                new_model.set_default_attn_processor()
526
527
528
            new_model.to(torch_device)

        with torch.no_grad():
Will Berman's avatar
Will Berman committed
529
530
531
532
533
            if self.forward_requires_fresh_args:
                image = model(**self.inputs_dict(0))
            else:
                image = model(**inputs_dict)

534
            if isinstance(image, dict):
535
                image = image.to_tuple()[0]
536

Will Berman's avatar
Will Berman committed
537
538
539
540
            if self.forward_requires_fresh_args:
                new_image = new_model(**self.inputs_dict(0))
            else:
                new_image = new_model(**inputs_dict)
541
542

            if isinstance(new_image, dict):
543
                new_image = new_image.to_tuple()[0]
544

545
546
547
            image = normalize_output(image)
            new_image = normalize_output(new_image)

548
549
        max_diff = (image - new_image).abs().max().item()
        self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes")
550

551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
    def test_getattr_is_correct(self):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict)

        # save some things to test
        model.dummy_attribute = 5
        model.register_to_config(test_attribute=5)

        logger = logging.get_logger("diffusers.models.modeling_utils")
        # 30 for warning
        logger.setLevel(30)
        with CaptureLogger(logger) as cap_logger:
            assert hasattr(model, "dummy_attribute")
            assert getattr(model, "dummy_attribute") == 5
            assert model.dummy_attribute == 5

        # no warning should be thrown
        assert cap_logger.out == ""

        logger = logging.get_logger("diffusers.models.modeling_utils")
        # 30 for warning
        logger.setLevel(30)
        with CaptureLogger(logger) as cap_logger:
            assert hasattr(model, "save_pretrained")
            fn = model.save_pretrained
            fn_1 = getattr(model, "save_pretrained")

            assert fn == fn_1
        # no warning should be thrown
        assert cap_logger.out == ""

        # warning should be thrown
        with self.assertWarns(FutureWarning):
            assert model.test_attribute == 5

        with self.assertWarns(FutureWarning):
            assert getattr(model, "test_attribute") == 5

        with self.assertRaises(AttributeError) as error:
            model.does_not_exist

        assert str(error.exception) == f"'{type(model).__name__}' object has no attribute 'does_not_exist'"

594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
    @unittest.skipIf(
        torch_device != "npu" or not is_torch_npu_available(),
        reason="torch npu flash attention is only available with NPU and `torch_npu` installed",
    )
    def test_set_torch_npu_flash_attn_processor_determinism(self):
        torch.use_deterministic_algorithms(False)
        if self.forward_requires_fresh_args:
            model = self.model_class(**self.init_dict)
        else:
            init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
            model = self.model_class(**init_dict)
        model.to(torch_device)

        if not hasattr(model, "set_attn_processor"):
            # If not has `set_attn_processor`, skip test
            return

        model.set_default_attn_processor()
        assert all(type(proc) == AttnProcessorNPU for proc in model.attn_processors.values())
        with torch.no_grad():
            if self.forward_requires_fresh_args:
                output = model(**self.inputs_dict(0))[0]
            else:
                output = model(**inputs_dict)[0]

        model.enable_npu_flash_attention()
        assert all(type(proc) == AttnProcessorNPU for proc in model.attn_processors.values())
        with torch.no_grad():
            if self.forward_requires_fresh_args:
                output_2 = model(**self.inputs_dict(0))[0]
            else:
                output_2 = model(**inputs_dict)[0]

        model.set_attn_processor(AttnProcessorNPU())
        assert all(type(proc) == AttnProcessorNPU for proc in model.attn_processors.values())
        with torch.no_grad():
            if self.forward_requires_fresh_args:
                output_3 = model(**self.inputs_dict(0))[0]
            else:
                output_3 = model(**inputs_dict)[0]

        torch.use_deterministic_algorithms(True)

        assert torch.allclose(output, output_2, atol=self.base_precision)
        assert torch.allclose(output, output_3, atol=self.base_precision)
        assert torch.allclose(output_2, output_3, atol=self.base_precision)

Dhruv Nair's avatar
Dhruv Nair committed
641
642
643
644
645
646
    @unittest.skipIf(
        torch_device != "cuda" or not is_xformers_available(),
        reason="XFormers attention is only available with CUDA and `xformers` installed",
    )
    def test_set_xformers_attn_processor_for_determinism(self):
        torch.use_deterministic_algorithms(False)
Will Berman's avatar
Will Berman committed
647
648
649
650
651
        if self.forward_requires_fresh_args:
            model = self.model_class(**self.init_dict)
        else:
            init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
            model = self.model_class(**init_dict)
Dhruv Nair's avatar
Dhruv Nair committed
652
653
654
655
        model.to(torch_device)

        if not hasattr(model, "set_attn_processor"):
            # If not has `set_attn_processor`, skip test
Dhruv Nair's avatar
Dhruv Nair committed
656
657
658
659
            return

        if not hasattr(model, "set_default_attn_processor"):
            # If not has `set_attn_processor`, skip test
Dhruv Nair's avatar
Dhruv Nair committed
660
661
662
663
664
            return

        model.set_default_attn_processor()
        assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values())
        with torch.no_grad():
Will Berman's avatar
Will Berman committed
665
666
667
668
            if self.forward_requires_fresh_args:
                output = model(**self.inputs_dict(0))[0]
            else:
                output = model(**inputs_dict)[0]
Dhruv Nair's avatar
Dhruv Nair committed
669
670
671
672

        model.enable_xformers_memory_efficient_attention()
        assert all(type(proc) == XFormersAttnProcessor for proc in model.attn_processors.values())
        with torch.no_grad():
Will Berman's avatar
Will Berman committed
673
674
675
676
            if self.forward_requires_fresh_args:
                output_2 = model(**self.inputs_dict(0))[0]
            else:
                output_2 = model(**inputs_dict)[0]
Dhruv Nair's avatar
Dhruv Nair committed
677

678
679
680
        model.set_attn_processor(XFormersAttnProcessor())
        assert all(type(proc) == XFormersAttnProcessor for proc in model.attn_processors.values())
        with torch.no_grad():
Will Berman's avatar
Will Berman committed
681
682
683
684
            if self.forward_requires_fresh_args:
                output_3 = model(**self.inputs_dict(0))[0]
            else:
                output_3 = model(**inputs_dict)[0]
685
686
687

        torch.use_deterministic_algorithms(True)

Dhruv Nair's avatar
Dhruv Nair committed
688
        assert torch.allclose(output, output_2, atol=self.base_precision)
689
690
        assert torch.allclose(output, output_3, atol=self.base_precision)
        assert torch.allclose(output_2, output_3, atol=self.base_precision)
Dhruv Nair's avatar
Dhruv Nair committed
691

692
    @require_torch_accelerator
693
    def test_set_attn_processor_for_determinism(self):
694
695
696
        if self.uses_custom_attn_processor:
            return

697
        torch.use_deterministic_algorithms(False)
Will Berman's avatar
Will Berman committed
698
699
700
701
702
703
        if self.forward_requires_fresh_args:
            model = self.model_class(**self.init_dict)
        else:
            init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
            model = self.model_class(**init_dict)

704
705
706
707
708
709
710
711
        model.to(torch_device)

        if not hasattr(model, "set_attn_processor"):
            # If not has `set_attn_processor`, skip test
            return

        assert all(type(proc) == AttnProcessor2_0 for proc in model.attn_processors.values())
        with torch.no_grad():
Will Berman's avatar
Will Berman committed
712
713
714
715
            if self.forward_requires_fresh_args:
                output_1 = model(**self.inputs_dict(0))[0]
            else:
                output_1 = model(**inputs_dict)[0]
716
717
718
719

        model.set_default_attn_processor()
        assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values())
        with torch.no_grad():
Will Berman's avatar
Will Berman committed
720
721
722
723
            if self.forward_requires_fresh_args:
                output_2 = model(**self.inputs_dict(0))[0]
            else:
                output_2 = model(**inputs_dict)[0]
724
725
726
727

        model.set_attn_processor(AttnProcessor2_0())
        assert all(type(proc) == AttnProcessor2_0 for proc in model.attn_processors.values())
        with torch.no_grad():
Will Berman's avatar
Will Berman committed
728
729
730
731
            if self.forward_requires_fresh_args:
                output_4 = model(**self.inputs_dict(0))[0]
            else:
                output_4 = model(**inputs_dict)[0]
732
733
734
735

        model.set_attn_processor(AttnProcessor())
        assert all(type(proc) == AttnProcessor for proc in model.attn_processors.values())
        with torch.no_grad():
Will Berman's avatar
Will Berman committed
736
737
738
739
            if self.forward_requires_fresh_args:
                output_5 = model(**self.inputs_dict(0))[0]
            else:
                output_5 = model(**inputs_dict)[0]
740
741
742
743
744
745
746
747

        torch.use_deterministic_algorithms(True)

        # make sure that outputs match
        assert torch.allclose(output_2, output_1, atol=self.base_precision)
        assert torch.allclose(output_2, output_4, atol=self.base_precision)
        assert torch.allclose(output_2, output_5, atol=self.base_precision)

748
    def test_from_save_pretrained_variant(self, expected_max_diff=5e-5):
Will Berman's avatar
Will Berman committed
749
750
751
752
753
        if self.forward_requires_fresh_args:
            model = self.model_class(**self.init_dict)
        else:
            init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
            model = self.model_class(**init_dict)
754

755
756
        if hasattr(model, "set_default_attn_processor"):
            model.set_default_attn_processor()
757

758
759
760
761
        model.to(torch_device)
        model.eval()

        with tempfile.TemporaryDirectory() as tmpdirname:
762
            model.save_pretrained(tmpdirname, variant="fp16", safe_serialization=False)
763
            new_model = self.model_class.from_pretrained(tmpdirname, variant="fp16")
764
765
            if hasattr(new_model, "set_default_attn_processor"):
                new_model.set_default_attn_processor()
766
767
768
769
770
771
772
773
774
775
776

            # non-variant cannot be loaded
            with self.assertRaises(OSError) as error_context:
                self.model_class.from_pretrained(tmpdirname)

            # make sure that error message states what keys are missing
            assert "Error no file named diffusion_pytorch_model.bin found in directory" in str(error_context.exception)

            new_model.to(torch_device)

        with torch.no_grad():
Will Berman's avatar
Will Berman committed
777
778
779
780
            if self.forward_requires_fresh_args:
                image = model(**self.inputs_dict(0))
            else:
                image = model(**inputs_dict)
781
            if isinstance(image, dict):
782
                image = image.to_tuple()[0]
783

Will Berman's avatar
Will Berman committed
784
785
786
787
            if self.forward_requires_fresh_args:
                new_image = new_model(**self.inputs_dict(0))
            else:
                new_image = new_model(**inputs_dict)
788
789

            if isinstance(new_image, dict):
790
                new_image = new_image.to_tuple()[0]
791

792
793
794
            image = normalize_output(image)
            new_image = normalize_output(new_image)

795
796
        max_diff = (image - new_image).abs().max().item()
        self.assertLessEqual(max_diff, expected_max_diff, "Models give different forward passes")
797

798
    @is_torch_compile
799
    @require_torch_2
800
801
802
803
    @unittest.skipIf(
        get_python_version == (3, 12),
        reason="Torch Dynamo isn't yet supported for Python 3.12.",
    )
804
    def test_from_save_pretrained_dynamo(self):
805
806
807
        init_dict, _ = self.prepare_init_args_and_inputs_for_common()
        inputs = [init_dict, self.model_class]
        run_test_in_subprocess(test_case=self, target_func=_test_from_save_pretrained_dynamo, inputs=inputs)
808

809
810
811
812
813
814
815
816
817
818
819
820
    def test_from_save_pretrained_dtype(self):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

        model = self.model_class(**init_dict)
        model.to(torch_device)
        model.eval()

        for dtype in [torch.float32, torch.float16, torch.bfloat16]:
            if torch_device == "mps" and dtype == torch.bfloat16:
                continue
            with tempfile.TemporaryDirectory() as tmpdirname:
                model.to(dtype)
821
                model.save_pretrained(tmpdirname, safe_serialization=False)
822
                new_model = self.model_class.from_pretrained(tmpdirname, low_cpu_mem_usage=True, torch_dtype=dtype)
823
                assert new_model.dtype == dtype
824
825
826
827
828
829
830
831
                if (
                    hasattr(self.model_class, "_keep_in_fp32_modules")
                    and self.model_class._keep_in_fp32_modules is None
                ):
                    new_model = self.model_class.from_pretrained(
                        tmpdirname, low_cpu_mem_usage=False, torch_dtype=dtype
                    )
                    assert new_model.dtype == dtype
832

833
    def test_determinism(self, expected_max_diff=1e-5):
Will Berman's avatar
Will Berman committed
834
835
836
837
838
        if self.forward_requires_fresh_args:
            model = self.model_class(**self.init_dict)
        else:
            init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
            model = self.model_class(**init_dict)
839
840
        model.to(torch_device)
        model.eval()
841

842
        with torch.no_grad():
Will Berman's avatar
Will Berman committed
843
844
845
846
            if self.forward_requires_fresh_args:
                first = model(**self.inputs_dict(0))
            else:
                first = model(**inputs_dict)
847
            if isinstance(first, dict):
848
                first = first.to_tuple()[0]
849

Will Berman's avatar
Will Berman committed
850
851
852
853
            if self.forward_requires_fresh_args:
                second = model(**self.inputs_dict(0))
            else:
                second = model(**inputs_dict)
854
            if isinstance(second, dict):
855
                second = second.to_tuple()[0]
856

857
858
859
            first = normalize_output(first)
            second = normalize_output(second)

860
861
862
863
864
        out_1 = first.cpu().numpy()
        out_2 = second.cpu().numpy()
        out_1 = out_1[~np.isnan(out_1)]
        out_2 = out_2[~np.isnan(out_2)]
        max_diff = np.amax(np.abs(out_1 - out_2))
865
        self.assertLessEqual(max_diff, expected_max_diff)
866

867
    def test_output(self, expected_output_shape=None):
868
869
870
871
872
873
874
875
876
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict)
        model.to(torch_device)
        model.eval()

        with torch.no_grad():
            output = model(**inputs_dict)

            if isinstance(output, dict):
877
                output = output.to_tuple()[0]
878
879
            if isinstance(output, list):
                output = torch.stack(output)
880
881

        self.assertIsNotNone(output)
882

883
884
        # input & output have to have the same shape
        input_tensor = inputs_dict[self.main_input_name]
885
886
        if isinstance(input_tensor, list):
            input_tensor = torch.stack(input_tensor)
887
888
889
890
891
892

        if expected_output_shape is None:
            expected_shape = input_tensor.shape
            self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")
        else:
            self.assertEqual(output.shape, expected_output_shape, "Input and output shapes do not match")
893

894
    def test_model_from_pretrained(self):
895
896
897
898
899
900
901
902
903
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

        model = self.model_class(**init_dict)
        model.to(torch_device)
        model.eval()

        # test if the model can be loaded from the config
        # and has all the expected shape
        with tempfile.TemporaryDirectory() as tmpdirname:
904
            model.save_pretrained(tmpdirname, safe_serialization=False)
905
            new_model = self.model_class.from_pretrained(tmpdirname)
906
907
908
            new_model.to(torch_device)
            new_model.eval()

909
        # check if all parameters shape are the same
910
911
912
913
914
915
916
917
918
        for param_name in model.state_dict().keys():
            param_1 = model.state_dict()[param_name]
            param_2 = new_model.state_dict()[param_name]
            self.assertEqual(param_1.shape, param_2.shape)

        with torch.no_grad():
            output_1 = model(**inputs_dict)

            if isinstance(output_1, dict):
919
                output_1 = output_1.to_tuple()[0]
920
921
            if isinstance(output_1, list):
                output_1 = torch.stack(output_1)
922
923
924
925

            output_2 = new_model(**inputs_dict)

            if isinstance(output_2, dict):
926
                output_2 = output_2.to_tuple()[0]
927
928
            if isinstance(output_2, list):
                output_2 = torch.stack(output_2)
929
930
931

        self.assertEqual(output_1.shape, output_2.shape)

Arsalan's avatar
Arsalan committed
932
    @require_torch_accelerator_with_training
933
934
935
936
937
938
939
940
941
    def test_training(self):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

        model = self.model_class(**init_dict)
        model.to(torch_device)
        model.train()
        output = model(**inputs_dict)

        if isinstance(output, dict):
942
            output = output.to_tuple()[0]
943

944
945
        input_tensor = inputs_dict[self.main_input_name]
        noise = torch.randn((input_tensor.shape[0],) + self.output_shape).to(torch_device)
946
947
948
        loss = torch.nn.functional.mse_loss(output, noise)
        loss.backward()

Arsalan's avatar
Arsalan committed
949
    @require_torch_accelerator_with_training
950
951
952
953
954
955
    def test_ema_training(self):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

        model = self.model_class(**init_dict)
        model.to(torch_device)
        model.train()
956
        ema_model = EMAModel(model.parameters())
957
958
959
960

        output = model(**inputs_dict)

        if isinstance(output, dict):
961
            output = output.to_tuple()[0]
962

963
964
        input_tensor = inputs_dict[self.main_input_name]
        noise = torch.randn((input_tensor.shape[0],) + self.output_shape).to(torch_device)
965
966
        loss = torch.nn.functional.mse_loss(output, noise)
        loss.backward()
967
        ema_model.step(model.parameters())
968

969
    def test_outputs_equivalence(self):
970
        def set_nan_tensor_to_zero(t):
971
972
973
974
975
            # Temporary fallback until `aten::_index_put_impl_` is implemented in mps
            # Track progress in https://github.com/pytorch/pytorch/issues/77764
            device = t.device
            if device.type == "mps":
                t = t.to("cpu")
976
            t[t != t] = 0
977
            return t.to(device)
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000

        def recursive_check(tuple_object, dict_object):
            if isinstance(tuple_object, (List, Tuple)):
                for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object.values()):
                    recursive_check(tuple_iterable_value, dict_iterable_value)
            elif isinstance(tuple_object, Dict):
                for tuple_iterable_value, dict_iterable_value in zip(tuple_object.values(), dict_object.values()):
                    recursive_check(tuple_iterable_value, dict_iterable_value)
            elif tuple_object is None:
                return
            else:
                self.assertTrue(
                    torch.allclose(
                        set_nan_tensor_to_zero(tuple_object), set_nan_tensor_to_zero(dict_object), atol=1e-5
                    ),
                    msg=(
                        "Tuple and dict output are not equal. Difference:"
                        f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
                        f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
                        f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
                    ),
                )

Will Berman's avatar
Will Berman committed
1001
1002
1003
1004
1005
        if self.forward_requires_fresh_args:
            model = self.model_class(**self.init_dict)
        else:
            init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
            model = self.model_class(**init_dict)
1006
1007
1008
1009

        model.to(torch_device)
        model.eval()

1010
        with torch.no_grad():
Will Berman's avatar
Will Berman committed
1011
1012
1013
1014
1015
1016
            if self.forward_requires_fresh_args:
                outputs_dict = model(**self.inputs_dict(0))
                outputs_tuple = model(**self.inputs_dict(0), return_dict=False)
            else:
                outputs_dict = model(**inputs_dict)
                outputs_tuple = model(**inputs_dict, return_dict=False)
1017
1018

        recursive_check(outputs_tuple, outputs_dict)
1019

Arsalan's avatar
Arsalan committed
1020
    @require_torch_accelerator_with_training
1021
    def test_enable_disable_gradient_checkpointing(self):
1022
        # Skip test if model does not support gradient checkpointing
1023
        if not self.model_class._supports_gradient_checkpointing:
1024
            pytest.skip("Gradient checkpointing is not supported.")
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038

        init_dict, _ = self.prepare_init_args_and_inputs_for_common()

        # at init model should have gradient checkpointing disabled
        model = self.model_class(**init_dict)
        self.assertFalse(model.is_gradient_checkpointing)

        # check enable works
        model.enable_gradient_checkpointing()
        self.assertTrue(model.is_gradient_checkpointing)

        # check disable works
        model.disable_gradient_checkpointing()
        self.assertFalse(model.is_gradient_checkpointing)
1039

1040
    @require_torch_accelerator_with_training
1041
    def test_effective_gradient_checkpointing(self, loss_tolerance=1e-5, param_grad_tol=5e-5, skip: set[str] = {}):
1042
        # Skip test if model does not support gradient checkpointing
1043
        if not self.model_class._supports_gradient_checkpointing:
1044
            pytest.skip("Gradient checkpointing is not supported.")
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088

        # enable deterministic behavior for gradient checkpointing
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        inputs_dict_copy = copy.deepcopy(inputs_dict)
        torch.manual_seed(0)
        model = self.model_class(**init_dict)
        model.to(torch_device)

        assert not model.is_gradient_checkpointing and model.training

        out = model(**inputs_dict).sample
        # run the backwards pass on the model. For backwards pass, for simplicity purpose,
        # we won't calculate the loss and rather backprop on out.sum()
        model.zero_grad()

        labels = torch.randn_like(out)
        loss = (out - labels).mean()
        loss.backward()

        # re-instantiate the model now enabling gradient checkpointing
        torch.manual_seed(0)
        model_2 = self.model_class(**init_dict)
        # clone model
        model_2.load_state_dict(model.state_dict())
        model_2.to(torch_device)
        model_2.enable_gradient_checkpointing()

        assert model_2.is_gradient_checkpointing and model_2.training

        out_2 = model_2(**inputs_dict_copy).sample
        # run the backwards pass on the model. For backwards pass, for simplicity purpose,
        # we won't calculate the loss and rather backprop on out.sum()
        model_2.zero_grad()
        loss_2 = (out_2 - labels).mean()
        loss_2.backward()

        # compare the output and parameters gradients
        self.assertTrue((loss - loss_2).abs() < loss_tolerance)
        named_params = dict(model.named_parameters())
        named_params_2 = dict(model_2.named_parameters())

        for name, param in named_params.items():
            if "post_quant_conv" in name:
                continue
1089
1090
            if name in skip:
                continue
1091
1092
1093
1094
            # TODO(aryan): remove the below lines after looking into easyanimate transformer a little more
            # It currently errors out the gradient checkpointing test because the gradients for attn2.to_out is None
            if param.grad is None:
                continue
1095
1096
1097
1098
1099
1100
            self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=param_grad_tol))

    @unittest.skipIf(torch_device == "mps", "This test is not supported for MPS devices.")
    def test_gradient_checkpointing_is_applied(
        self, expected_set=None, attention_head_dim=None, num_attention_heads=None, block_out_channels=None
    ):
1101
        # Skip test if model does not support gradient checkpointing
1102
        if not self.model_class._supports_gradient_checkpointing:
1103
            pytest.skip("Gradient checkpointing is not supported.")
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117

        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

        if attention_head_dim is not None:
            init_dict["attention_head_dim"] = attention_head_dim
        if num_attention_heads is not None:
            init_dict["num_attention_heads"] = num_attention_heads
        if block_out_channels is not None:
            init_dict["block_out_channels"] = block_out_channels

        model_class_copy = copy.copy(self.model_class)
        model = model_class_copy(**init_dict)
        model.enable_gradient_checkpointing()

1118
1119
1120
1121
1122
1123
        modules_with_gc_enabled = {}
        for submodule in model.modules():
            if hasattr(submodule, "gradient_checkpointing"):
                self.assertTrue(submodule.gradient_checkpointing)
                modules_with_gc_enabled[submodule.__class__.__name__] = True

1124
1125
1126
        assert set(modules_with_gc_enabled.keys()) == expected_set
        assert all(modules_with_gc_enabled.values()), "All modules should be enabled"

1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
    def test_deprecated_kwargs(self):
        has_kwarg_in_model_class = "kwargs" in inspect.signature(self.model_class.__init__).parameters
        has_deprecated_kwarg = len(self.model_class._deprecated_kwargs) > 0

        if has_kwarg_in_model_class and not has_deprecated_kwarg:
            raise ValueError(
                f"{self.model_class} has `**kwargs` in its __init__ method but has not defined any deprecated kwargs"
                " under the `_deprecated_kwargs` class attribute. Make sure to either remove `**kwargs` if there are"
                " no deprecated arguments or add the deprecated argument with `_deprecated_kwargs ="
                " [<deprecated_argument>]`"
            )

        if not has_kwarg_in_model_class and has_deprecated_kwarg:
            raise ValueError(
                f"{self.model_class} doesn't have `**kwargs` in its __init__ method but has defined deprecated kwargs"
                " under the `_deprecated_kwargs` class attribute. Make sure to either add the `**kwargs` argument to"
                f" {self.model_class}.__init__ if there are deprecated arguments or remove the deprecated argument"
                " from `_deprecated_kwargs = [<deprecated_argument>]`"
            )
1146

1147
    @parameterized.expand([(4, 4, True), (4, 8, False), (8, 4, False)])
1148
1149
    @torch.no_grad()
    @unittest.skipIf(not is_peft_available(), "Only with PEFT")
1150
    def test_save_load_lora_adapter(self, rank, lora_alpha, use_dora=False):
1151
1152
1153
1154
1155
1156
1157
1158
1159
        from peft import LoraConfig
        from peft.utils import get_peft_model_state_dict

        from diffusers.loaders.peft import PeftAdapterMixin

        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict).to(torch_device)

        if not issubclass(model.__class__, PeftAdapterMixin):
1160
            pytest.skip(f"PEFT is not supported for this model ({model.__class__.__name__}).")
1161
1162
1163

        torch.manual_seed(0)
        output_no_lora = model(**inputs_dict, return_dict=False)[0]
1164
1165
        if isinstance(output_no_lora, list):
            output_no_lora = torch.stack(output_no_lora)
1166
1167

        denoiser_lora_config = LoraConfig(
1168
1169
            r=rank,
            lora_alpha=lora_alpha,
1170
1171
1172
1173
1174
1175
1176
1177
1178
            target_modules=["to_q", "to_k", "to_v", "to_out.0"],
            init_lora_weights=False,
            use_dora=use_dora,
        )
        model.add_adapter(denoiser_lora_config)
        self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly")

        torch.manual_seed(0)
        outputs_with_lora = model(**inputs_dict, return_dict=False)[0]
1179
1180
        if isinstance(outputs_with_lora, list):
            outputs_with_lora = torch.stack(outputs_with_lora)
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204

        self.assertFalse(torch.allclose(output_no_lora, outputs_with_lora, atol=1e-4, rtol=1e-4))

        with tempfile.TemporaryDirectory() as tmpdir:
            model.save_lora_adapter(tmpdir)
            self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))

            state_dict_loaded = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))

            model.unload_lora()
            self.assertFalse(check_if_lora_correctly_set(model), "LoRA layers not set correctly")

            model.load_lora_adapter(tmpdir, prefix=None, use_safetensors=True)
            state_dict_retrieved = get_peft_model_state_dict(model, adapter_name="default_0")

            for k in state_dict_loaded:
                loaded_v = state_dict_loaded[k]
                retrieved_v = state_dict_retrieved[k].to(loaded_v.device)
                self.assertTrue(torch.allclose(loaded_v, retrieved_v))

            self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly")

        torch.manual_seed(0)
        outputs_with_lora_2 = model(**inputs_dict, return_dict=False)[0]
1205
1206
        if isinstance(outputs_with_lora_2, list):
            outputs_with_lora_2 = torch.stack(outputs_with_lora_2)
1207
1208
1209
1210
1211

        self.assertFalse(torch.allclose(output_no_lora, outputs_with_lora_2, atol=1e-4, rtol=1e-4))
        self.assertTrue(torch.allclose(outputs_with_lora, outputs_with_lora_2, atol=1e-4, rtol=1e-4))

    @unittest.skipIf(not is_peft_available(), "Only with PEFT")
1212
    def test_lora_wrong_adapter_name_raises_error(self):
1213
1214
1215
1216
1217
1218
1219
1220
        from peft import LoraConfig

        from diffusers.loaders.peft import PeftAdapterMixin

        init_dict, _ = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict).to(torch_device)

        if not issubclass(model.__class__, PeftAdapterMixin):
1221
            pytest.skip(f"PEFT is not supported for this model ({model.__class__.__name__}).")
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239

        denoiser_lora_config = LoraConfig(
            r=4,
            lora_alpha=4,
            target_modules=["to_q", "to_k", "to_v", "to_out.0"],
            init_lora_weights=False,
            use_dora=False,
        )
        model.add_adapter(denoiser_lora_config)
        self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly")

        with tempfile.TemporaryDirectory() as tmpdir:
            wrong_name = "foo"
            with self.assertRaises(ValueError) as err_context:
                model.save_lora_adapter(tmpdir, adapter_name=wrong_name)

            self.assertTrue(f"Adapter name {wrong_name} not found in the model." in str(err_context.exception))

1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
    @parameterized.expand([(4, 4, True), (4, 8, False), (8, 4, False)])
    @torch.no_grad()
    @unittest.skipIf(not is_peft_available(), "Only with PEFT")
    def test_lora_adapter_metadata_is_loaded_correctly(self, rank, lora_alpha, use_dora):
        from peft import LoraConfig

        from diffusers.loaders.peft import PeftAdapterMixin

        init_dict, _ = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict).to(torch_device)

        if not issubclass(model.__class__, PeftAdapterMixin):
1252
            pytest.skip(f"PEFT is not supported for this model ({model.__class__.__name__}).")
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288

        denoiser_lora_config = LoraConfig(
            r=rank,
            lora_alpha=lora_alpha,
            target_modules=["to_q", "to_k", "to_v", "to_out.0"],
            init_lora_weights=False,
            use_dora=use_dora,
        )
        model.add_adapter(denoiser_lora_config)
        metadata = model.peft_config["default"].to_dict()
        self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly")

        with tempfile.TemporaryDirectory() as tmpdir:
            model.save_lora_adapter(tmpdir)
            model_file = os.path.join(tmpdir, "pytorch_lora_weights.safetensors")
            self.assertTrue(os.path.isfile(model_file))

            model.unload_lora()
            self.assertFalse(check_if_lora_correctly_set(model), "LoRA layers not set correctly")

            model.load_lora_adapter(tmpdir, prefix=None, use_safetensors=True)
            parsed_metadata = model.peft_config["default_0"].to_dict()
            check_if_dicts_are_equal(metadata, parsed_metadata)

    @torch.no_grad()
    @unittest.skipIf(not is_peft_available(), "Only with PEFT")
    def test_lora_adapter_wrong_metadata_raises_error(self):
        from peft import LoraConfig

        from diffusers.loaders.lora_base import LORA_ADAPTER_METADATA_KEY
        from diffusers.loaders.peft import PeftAdapterMixin

        init_dict, _ = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict).to(torch_device)

        if not issubclass(model.__class__, PeftAdapterMixin):
1289
            pytest.skip(f"PEFT is not supported for this model ({model.__class__.__name__}).")
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323

        denoiser_lora_config = LoraConfig(
            r=4,
            lora_alpha=4,
            target_modules=["to_q", "to_k", "to_v", "to_out.0"],
            init_lora_weights=False,
            use_dora=False,
        )
        model.add_adapter(denoiser_lora_config)
        self.assertTrue(check_if_lora_correctly_set(model), "LoRA layers not set correctly")

        with tempfile.TemporaryDirectory() as tmpdir:
            model.save_lora_adapter(tmpdir)
            model_file = os.path.join(tmpdir, "pytorch_lora_weights.safetensors")
            self.assertTrue(os.path.isfile(model_file))

            # Perturb the metadata in the state dict.
            loaded_state_dict = safetensors.torch.load_file(model_file)
            metadata = {"format": "pt"}
            lora_adapter_metadata = denoiser_lora_config.to_dict()
            lora_adapter_metadata.update({"foo": 1, "bar": 2})
            for key, value in lora_adapter_metadata.items():
                if isinstance(value, set):
                    lora_adapter_metadata[key] = list(value)
            metadata[LORA_ADAPTER_METADATA_KEY] = json.dumps(lora_adapter_metadata, indent=2, sort_keys=True)
            safetensors.torch.save_file(loaded_state_dict, model_file, metadata=metadata)

            model.unload_lora()
            self.assertFalse(check_if_lora_correctly_set(model), "LoRA layers not set correctly")

            with self.assertRaises(TypeError) as err_context:
                model.load_lora_adapter(tmpdir, prefix=None, use_safetensors=True)
            self.assertTrue("`LoraConfig` class could not be instantiated" in str(err_context.exception))

1324
    @require_torch_accelerator
1325
    def test_cpu_offload(self):
1326
1327
        if self.model_class._no_split_modules is None:
            pytest.skip("Test not supported for this model as `_no_split_modules` is not set.")
1328

1329
1330
1331
1332
1333
1334
        config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**config).eval()
        model = model.to(torch_device)

        torch.manual_seed(0)
        base_output = model(**inputs_dict)
1335
        base_normalized_output = normalize_output(base_output)
1336

YiYi Xu's avatar
YiYi Xu committed
1337
        model_size = compute_module_sizes(model)[""]
1338
        max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]]
1339

1340
1341
1342
1343
1344
1345
        with tempfile.TemporaryDirectory() as tmp_dir:
            model.cpu().save_pretrained(tmp_dir)

            for max_size in max_gpu_sizes:
                max_memory = {0: max_size, "cpu": model_size * 2}
                new_model = self.model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory)
1346

1347
1348
1349
1350
                # Making sure part of the model will actually end up offloaded
                self.assertSetEqual(set(new_model.hf_device_map.values()), {0, "cpu"})

                self.check_device_map_is_respected(new_model, new_model.hf_device_map)
1351

1352
1353
                torch.manual_seed(0)
                new_output = new_model(**inputs_dict)
1354
                new_normalized_output = normalize_output(new_output)
1355

1356
                self.assertTrue(torch.allclose(base_normalized_output, new_normalized_output, atol=1e-5))
1357

1358
    @require_torch_accelerator
1359
    def test_disk_offload_without_safetensors(self):
1360
1361
        if self.model_class._no_split_modules is None:
            pytest.skip("Test not supported for this model as `_no_split_modules` is not set.")
1362
1363
        config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**config).eval()
1364

1365
1366
1367
1368
        model = model.to(torch_device)

        torch.manual_seed(0)
        base_output = model(**inputs_dict)
1369
        base_normalized_output = normalize_output(base_output)
1370

YiYi Xu's avatar
YiYi Xu committed
1371
        model_size = compute_module_sizes(model)[""]
1372
1373
1374
1375
        max_size = int(self.model_split_percents[0] * model_size)
        # Force disk offload by setting very small CPU memory
        max_memory = {0: max_size, "cpu": int(0.1 * max_size)}

1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
        with tempfile.TemporaryDirectory() as tmp_dir:
            model.cpu().save_pretrained(tmp_dir, safe_serialization=False)
            with self.assertRaises(ValueError):
                # This errors out because it's missing an offload folder
                new_model = self.model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory)

            new_model = self.model_class.from_pretrained(
                tmp_dir, device_map="auto", max_memory=max_memory, offload_folder=tmp_dir
            )

            self.check_device_map_is_respected(new_model, new_model.hf_device_map)
            torch.manual_seed(0)
            new_output = new_model(**inputs_dict)
1389
1390
            new_normalized_output = normalize_output(new_output)
            self.assertTrue(torch.allclose(base_normalized_output, new_normalized_output, atol=1e-5))
1391

1392
    @require_torch_accelerator
1393
    def test_disk_offload_with_safetensors(self):
1394
1395
        if self.model_class._no_split_modules is None:
            pytest.skip("Test not supported for this model as `_no_split_modules` is not set.")
1396
1397
        config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**config).eval()
1398

1399
1400
1401
1402
        model = model.to(torch_device)

        torch.manual_seed(0)
        base_output = model(**inputs_dict)
1403
        base_normalized_output = normalize_output(base_output)
1404

YiYi Xu's avatar
YiYi Xu committed
1405
        model_size = compute_module_sizes(model)[""]
1406
1407
1408
        with tempfile.TemporaryDirectory() as tmp_dir:
            model.cpu().save_pretrained(tmp_dir)

1409
            max_size = int(self.model_split_percents[0] * model_size)
1410
1411
1412
1413
1414
1415
1416
1417
            max_memory = {0: max_size, "cpu": max_size}
            new_model = self.model_class.from_pretrained(
                tmp_dir, device_map="auto", offload_folder=tmp_dir, max_memory=max_memory
            )

            self.check_device_map_is_respected(new_model, new_model.hf_device_map)
            torch.manual_seed(0)
            new_output = new_model(**inputs_dict)
1418
            new_normalized_output = normalize_output(new_output)
1419

1420
            self.assertTrue(torch.allclose(base_normalized_output, new_normalized_output, atol=1e-5))
1421

1422
    @require_torch_multi_accelerator
1423
    def test_model_parallelism(self):
1424
1425
        if self.model_class._no_split_modules is None:
            pytest.skip("Test not supported for this model as `_no_split_modules` is not set.")
1426
1427
        config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**config).eval()
1428

1429
1430
1431
1432
1433
        model = model.to(torch_device)

        torch.manual_seed(0)
        base_output = model(**inputs_dict)

YiYi Xu's avatar
YiYi Xu committed
1434
        model_size = compute_module_sizes(model)[""]
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
        # We test several splits of sizes to make sure it works.
        max_gpu_sizes = [int(p * model_size) for p in self.model_split_percents[1:]]
        with tempfile.TemporaryDirectory() as tmp_dir:
            model.cpu().save_pretrained(tmp_dir)

            for max_size in max_gpu_sizes:
                max_memory = {0: max_size, 1: model_size * 2, "cpu": model_size * 2}
                new_model = self.model_class.from_pretrained(tmp_dir, device_map="auto", max_memory=max_memory)
                # Making sure part of the model will actually end up offloaded
                self.assertSetEqual(set(new_model.hf_device_map.values()), {0, 1})

                self.check_device_map_is_respected(new_model, new_model.hf_device_map)

                torch.manual_seed(0)
                new_output = new_model(**inputs_dict)

                self.assertTrue(torch.allclose(base_output[0], new_output[0], atol=1e-5))

1453
    @require_torch_accelerator
1454
    def test_sharded_checkpoints(self):
1455
        torch.manual_seed(0)
1456
1457
        config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**config).eval()
1458
1459
1460
        model = model.to(torch_device)

        base_output = model(**inputs_dict)
1461
        base_normalized_output = normalize_output(base_output)
1462

1463
        model_size = compute_module_persistent_sizes(model)[""]
1464
1465
1466
1467
1468
1469
1470
1471
        max_shard_size = int((model_size * 0.75) / (2**10))  # Convert to KB as these test models are small.
        with tempfile.TemporaryDirectory() as tmp_dir:
            model.cpu().save_pretrained(tmp_dir, max_shard_size=f"{max_shard_size}KB")
            self.assertTrue(os.path.exists(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME)))

            # Now check if the right number of shards exists. First, let's get the number of shards.
            # Since this number can be dependent on the model being tested, it's important that we calculate it
            # instead of hardcoding it.
1472
            expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))
1473
1474
1475
            actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(".safetensors")])
            self.assertTrue(actual_num_shards == expected_num_shards)

1476
            new_model = self.model_class.from_pretrained(tmp_dir).eval()
1477
            new_model = new_model.to(torch_device)
1478
1479

            torch.manual_seed(0)
1480
1481
            if "generator" in inputs_dict:
                _, inputs_dict = self.prepare_init_args_and_inputs_for_common()
1482
            new_output = new_model(**inputs_dict)
1483
            new_normalized_output = normalize_output(new_output)
1484

1485
            self.assertTrue(torch.allclose(base_normalized_output, new_normalized_output, atol=1e-5))
1486

1487
    @require_torch_accelerator
1488
1489
1490
1491
1492
1493
1494
    def test_sharded_checkpoints_with_variant(self):
        torch.manual_seed(0)
        config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**config).eval()
        model = model.to(torch_device)

        base_output = model(**inputs_dict)
1495
        base_normalized_output = normalize_output(base_output)
1496

1497
        model_size = compute_module_persistent_sizes(model)[""]
1498
1499
1500
1501
1502
1503
1504
        max_shard_size = int((model_size * 0.75) / (2**10))  # Convert to KB as these test models are small.
        variant = "fp16"
        with tempfile.TemporaryDirectory() as tmp_dir:
            # It doesn't matter if the actual model is in fp16 or not. Just adding the variant and
            # testing if loading works with the variant when the checkpoint is sharded should be
            # enough.
            model.cpu().save_pretrained(tmp_dir, max_shard_size=f"{max_shard_size}KB", variant=variant)
1505

1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
            index_filename = _add_variant(SAFE_WEIGHTS_INDEX_NAME, variant)
            self.assertTrue(os.path.exists(os.path.join(tmp_dir, index_filename)))

            # Now check if the right number of shards exists. First, let's get the number of shards.
            # Since this number can be dependent on the model being tested, it's important that we calculate it
            # instead of hardcoding it.
            expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, index_filename))
            actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(".safetensors")])
            self.assertTrue(actual_num_shards == expected_num_shards)

            new_model = self.model_class.from_pretrained(tmp_dir, variant=variant).eval()
            new_model = new_model.to(torch_device)

            torch.manual_seed(0)
            if "generator" in inputs_dict:
                _, inputs_dict = self.prepare_init_args_and_inputs_for_common()
            new_output = new_model(**inputs_dict)
1523
            new_normalized_output = normalize_output(new_output)
1524

1525
            self.assertTrue(torch.allclose(base_normalized_output, new_normalized_output, atol=1e-5))
1526

1527
1528
1529
1530
1531
1532
1533
1534
    @require_torch_accelerator
    def test_sharded_checkpoints_with_parallel_loading(self):
        torch.manual_seed(0)
        config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**config).eval()
        model = model.to(torch_device)

        base_output = model(**inputs_dict)
1535
        base_normalized_output = normalize_output(base_output)
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558

        model_size = compute_module_persistent_sizes(model)[""]
        max_shard_size = int((model_size * 0.75) / (2**10))  # Convert to KB as these test models are small.
        with tempfile.TemporaryDirectory() as tmp_dir:
            model.cpu().save_pretrained(tmp_dir, max_shard_size=f"{max_shard_size}KB")
            self.assertTrue(os.path.exists(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME)))

            # Now check if the right number of shards exists. First, let's get the number of shards.
            # Since this number can be dependent on the model being tested, it's important that we calculate it
            # instead of hardcoding it.
            expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))
            actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(".safetensors")])
            self.assertTrue(actual_num_shards == expected_num_shards)

            # Load with parallel loading
            os.environ["HF_ENABLE_PARALLEL_LOADING"] = "yes"
            new_model = self.model_class.from_pretrained(tmp_dir).eval()
            new_model = new_model.to(torch_device)

            torch.manual_seed(0)
            if "generator" in inputs_dict:
                _, inputs_dict = self.prepare_init_args_and_inputs_for_common()
            new_output = new_model(**inputs_dict)
1559
1560
1561
            new_normalized_output = normalize_output(new_output)

            self.assertTrue(torch.allclose(base_normalized_output, new_normalized_output, atol=1e-5))
1562
1563
1564
            # set to no.
            os.environ["HF_ENABLE_PARALLEL_LOADING"] = "no"

1565
    @require_torch_accelerator
1566
    def test_sharded_checkpoints_device_map(self):
1567
1568
        if self.model_class._no_split_modules is None:
            pytest.skip("Test not supported for this model as `_no_split_modules` is not set.")
1569
1570
        config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**config).eval()
1571
1572
1573
1574
        model = model.to(torch_device)

        torch.manual_seed(0)
        base_output = model(**inputs_dict)
1575
        base_normalized_output = normalize_output(base_output)
1576

1577
        model_size = compute_module_persistent_sizes(model)[""]
1578
1579
1580
1581
1582
1583
1584
1585
        max_shard_size = int((model_size * 0.75) / (2**10))  # Convert to KB as these test models are small.
        with tempfile.TemporaryDirectory() as tmp_dir:
            model.cpu().save_pretrained(tmp_dir, max_shard_size=f"{max_shard_size}KB")
            self.assertTrue(os.path.exists(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME)))

            # Now check if the right number of shards exists. First, let's get the number of shards.
            # Since this number can be dependent on the model being tested, it's important that we calculate it
            # instead of hardcoding it.
1586
            expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME))
1587
1588
1589
1590
1591
1592
            actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(".safetensors")])
            self.assertTrue(actual_num_shards == expected_num_shards)

            new_model = self.model_class.from_pretrained(tmp_dir, device_map="auto")

            torch.manual_seed(0)
1593
1594
            if "generator" in inputs_dict:
                _, inputs_dict = self.prepare_init_args_and_inputs_for_common()
1595
            new_output = new_model(**inputs_dict)
1596
1597
1598
            new_normalized_output = normalize_output(new_output)

            self.assertTrue(torch.allclose(base_normalized_output, new_normalized_output, atol=1e-5))
1599

1600
1601
1602
1603
1604
1605
1606
1607
    # This test is okay without a GPU because we're not running any execution. We're just serializing
    # and check if the resultant files are following an expected format.
    def test_variant_sharded_ckpt_right_format(self):
        for use_safe in [True, False]:
            extension = ".safetensors" if use_safe else ".bin"
            config, _ = self.prepare_init_args_and_inputs_for_common()
            model = self.model_class(**config).eval()

1608
            model_size = compute_module_persistent_sizes(model)[""]
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
            max_shard_size = int((model_size * 0.75) / (2**10))  # Convert to KB as these test models are small.
            variant = "fp16"
            with tempfile.TemporaryDirectory() as tmp_dir:
                model.cpu().save_pretrained(
                    tmp_dir, variant=variant, max_shard_size=f"{max_shard_size}KB", safe_serialization=use_safe
                )
                index_variant = _add_variant(SAFE_WEIGHTS_INDEX_NAME if use_safe else WEIGHTS_INDEX_NAME, variant)
                self.assertTrue(os.path.exists(os.path.join(tmp_dir, index_variant)))

                # Now check if the right number of shards exists. First, let's get the number of shards.
                # Since this number can be dependent on the model being tested, it's important that we calculate it
                # instead of hardcoding it.
                expected_num_shards = caculate_expected_num_shards(os.path.join(tmp_dir, index_variant))
                actual_num_shards = len([file for file in os.listdir(tmp_dir) if file.endswith(extension)])
                self.assertTrue(actual_num_shards == expected_num_shards)

                # Check if the variant is present as a substring in the checkpoints.
                shard_files = [
                    file
                    for file in os.listdir(tmp_dir)
                    if file.endswith(extension) or ("index" in file and "json" in file)
                ]
                assert all(variant in f for f in shard_files)

                # Check if the sharded checkpoints were serialized in the right format.
                shard_files = [file for file in os.listdir(tmp_dir) if file.endswith(extension)]
                # Example: diffusion_pytorch_model.fp16-00001-of-00002.safetensors
                assert all(f.split(".")[1].split("-")[0] == variant for f in shard_files)

1638
1639
1640
    def test_layerwise_casting_training(self):
        def test_fn(storage_dtype, compute_dtype):
            if torch.device(torch_device).type == "cpu" and compute_dtype == torch.bfloat16:
1641
                pytest.skip("Skipping test because CPU doesn't go well with bfloat16.")
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
            init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

            model = self.model_class(**init_dict)
            model = model.to(torch_device, dtype=compute_dtype)
            model.enable_layerwise_casting(storage_dtype=storage_dtype, compute_dtype=compute_dtype)
            model.train()

            inputs_dict = cast_maybe_tensor_dtype(inputs_dict, torch.float32, compute_dtype)
            with torch.amp.autocast(device_type=torch.device(torch_device).type):
                output = model(**inputs_dict)

                if isinstance(output, dict):
                    output = output.to_tuple()[0]

                input_tensor = inputs_dict[self.main_input_name]
                noise = torch.randn((input_tensor.shape[0],) + self.output_shape).to(torch_device)
                noise = cast_maybe_tensor_dtype(noise, torch.float32, compute_dtype)
                loss = torch.nn.functional.mse_loss(output, noise)

            loss.backward()

        test_fn(torch.float16, torch.float32)
        test_fn(torch.float8_e4m3fn, torch.float32)
        test_fn(torch.float8_e5m2, torch.float32)
        test_fn(torch.float8_e4m3fn, torch.bfloat16)

1668
    @torch.no_grad()
Aryan's avatar
Aryan committed
1669
    def test_layerwise_casting_inference(self):
1670
1671
        from diffusers.hooks._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS
        from diffusers.hooks.layerwise_casting import DEFAULT_SKIP_MODULES_PATTERN
Aryan's avatar
Aryan committed
1672
1673
1674

        torch.manual_seed(0)
        config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
1675
1676
1677
        model = self.model_class(**config)
        model.eval()
        model.to(torch_device)
1678
1679
1680
        base_slice = model(**inputs_dict)[0]
        base_slice = normalize_output(base_slice)
        base_slice = base_slice.detach().flatten().cpu().numpy()
Aryan's avatar
Aryan committed
1681
1682
1683
1684
1685
1686

        def check_linear_dtype(module, storage_dtype, compute_dtype):
            patterns_to_check = DEFAULT_SKIP_MODULES_PATTERN
            if getattr(module, "_skip_layerwise_casting_patterns", None) is not None:
                patterns_to_check += tuple(module._skip_layerwise_casting_patterns)
            for name, submodule in module.named_modules():
1687
                if not isinstance(submodule, _GO_LC_SUPPORTED_PYTORCH_LAYERS):
Aryan's avatar
Aryan committed
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
                    continue
                dtype_to_check = storage_dtype
                if any(re.search(pattern, name) for pattern in patterns_to_check):
                    dtype_to_check = compute_dtype
                if getattr(submodule, "weight", None) is not None:
                    self.assertEqual(submodule.weight.dtype, dtype_to_check)
                if getattr(submodule, "bias", None) is not None:
                    self.assertEqual(submodule.bias.dtype, dtype_to_check)

        def test_layerwise_casting(storage_dtype, compute_dtype):
            torch.manual_seed(0)
            config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
            inputs_dict = cast_maybe_tensor_dtype(inputs_dict, torch.float32, compute_dtype)
            model = self.model_class(**config).eval()
            model = model.to(torch_device, dtype=compute_dtype)
            model.enable_layerwise_casting(storage_dtype=storage_dtype, compute_dtype=compute_dtype)

            check_linear_dtype(model, storage_dtype, compute_dtype)
1706
1707
1708
            output = model(**inputs_dict)[0]
            output = normalize_output(output)
            output = output.float().flatten().detach().cpu().numpy()
Aryan's avatar
Aryan committed
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718

            # The precision test is not very important for fast tests. In most cases, the outputs will not be the same.
            # We just want to make sure that the layerwise casting is working as expected.
            self.assertTrue(numpy_cosine_similarity_distance(base_slice, output) < 1.0)

        test_layerwise_casting(torch.float16, torch.float32)
        test_layerwise_casting(torch.float8_e4m3fn, torch.float32)
        test_layerwise_casting(torch.float8_e5m2, torch.float32)
        test_layerwise_casting(torch.float8_e4m3fn, torch.bfloat16)

1719
    @require_torch_accelerator
1720
    @torch.no_grad()
Aryan's avatar
Aryan committed
1721
1722
    def test_layerwise_casting_memory(self):
        MB_TOLERANCE = 0.2
1723
        LEAST_COMPUTE_CAPABILITY = 8.0
Aryan's avatar
Aryan committed
1724
1725
1726

        def reset_memory_stats():
            gc.collect()
1727
1728
1729
            backend_synchronize(torch_device)
            backend_empty_cache(torch_device)
            backend_reset_peak_memory_stats(torch_device)
Aryan's avatar
Aryan committed
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741

        def get_memory_usage(storage_dtype, compute_dtype):
            torch.manual_seed(0)
            config, inputs_dict = self.prepare_init_args_and_inputs_for_common()
            inputs_dict = cast_maybe_tensor_dtype(inputs_dict, torch.float32, compute_dtype)
            model = self.model_class(**config).eval()
            model = model.to(torch_device, dtype=compute_dtype)
            model.enable_layerwise_casting(storage_dtype=storage_dtype, compute_dtype=compute_dtype)

            reset_memory_stats()
            model(**inputs_dict)
            model_memory_footprint = model.get_memory_footprint()
1742
            peak_inference_memory_allocated_mb = backend_max_memory_allocated(torch_device) / 1024**2
Aryan's avatar
Aryan committed
1743
1744
1745
1746
1747
1748
1749
1750
1751

            return model_memory_footprint, peak_inference_memory_allocated_mb

        fp32_memory_footprint, fp32_max_memory = get_memory_usage(torch.float32, torch.float32)
        fp8_e4m3_fp32_memory_footprint, fp8_e4m3_fp32_max_memory = get_memory_usage(torch.float8_e4m3fn, torch.float32)
        fp8_e4m3_bf16_memory_footprint, fp8_e4m3_bf16_max_memory = get_memory_usage(
            torch.float8_e4m3fn, torch.bfloat16
        )

1752
        compute_capability = get_torch_cuda_device_capability() if torch_device == "cuda" else None
Aryan's avatar
Aryan committed
1753
        self.assertTrue(fp8_e4m3_bf16_memory_footprint < fp8_e4m3_fp32_memory_footprint < fp32_memory_footprint)
1754
1755
1756
1757
        # NOTE: the following assertion would fail on our CI (running Tesla T4) due to bf16 using more memory than fp32.
        # On other devices, such as DGX (Ampere) and Audace (Ada), the test passes. So, we conditionally check it.
        if compute_capability and compute_capability >= LEAST_COMPUTE_CAPABILITY:
            self.assertTrue(fp8_e4m3_bf16_max_memory < fp8_e4m3_fp32_max_memory)
Aryan's avatar
Aryan committed
1758
1759
1760
1761
1762
1763
1764
1765
        # On this dummy test case with a small model, sometimes fp8_e4m3_fp32 max memory usage is higher than fp32 by a few
        # bytes. This only happens for some models, so we allow a small tolerance.
        # For any real model being tested, the order would be fp8_e4m3_bf16 < fp8_e4m3_fp32 < fp32.
        self.assertTrue(
            fp8_e4m3_fp32_max_memory < fp32_max_memory
            or abs(fp8_e4m3_fp32_max_memory - fp32_max_memory) < MB_TOLERANCE
        )

1766
    @parameterized.expand([False, True])
1767
    @require_torch_accelerator
1768
    def test_group_offloading(self, record_stream):
1769
1770
1771
1772
1773
1774
        for cls in inspect.getmro(self.__class__):
            if "test_group_offloading" in cls.__dict__ and cls is not ModelTesterMixin:
                # Skip this test if it is overwritten by child class. We need to do this because parameterized
                # materializes the test methods on invocation which cannot be overridden.
                pytest.skip("Model does not support group offloading.")

1775
1776
1777
        if not self.model_class._supports_group_offloading:
            pytest.skip("Model does not support group offloading.")

Aryan's avatar
Aryan committed
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        torch.manual_seed(0)

        @torch.no_grad()
        def run_forward(model):
            self.assertTrue(
                all(
                    module._diffusers_hook.get_hook("group_offloading") is not None
                    for module in model.modules()
                    if hasattr(module, "_diffusers_hook")
                )
            )
            model.eval()
            return model(**inputs_dict)[0]

        model = self.model_class(**init_dict)

        model.to(torch_device)
        output_without_group_offloading = run_forward(model)
1797
        output_without_group_offloading = normalize_output(output_without_group_offloading)
Aryan's avatar
Aryan committed
1798
1799
1800
1801
1802

        torch.manual_seed(0)
        model = self.model_class(**init_dict)
        model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1)
        output_with_group_offloading1 = run_forward(model)
1803
        output_with_group_offloading1 = normalize_output(output_with_group_offloading1)
Aryan's avatar
Aryan committed
1804
1805
1806
1807
1808

        torch.manual_seed(0)
        model = self.model_class(**init_dict)
        model.enable_group_offload(torch_device, offload_type="block_level", num_blocks_per_group=1, non_blocking=True)
        output_with_group_offloading2 = run_forward(model)
1809
        output_with_group_offloading2 = normalize_output(output_with_group_offloading2)
Aryan's avatar
Aryan committed
1810
1811
1812
1813
1814

        torch.manual_seed(0)
        model = self.model_class(**init_dict)
        model.enable_group_offload(torch_device, offload_type="leaf_level")
        output_with_group_offloading3 = run_forward(model)
1815
        output_with_group_offloading3 = normalize_output(output_with_group_offloading3)
Aryan's avatar
Aryan committed
1816
1817
1818

        torch.manual_seed(0)
        model = self.model_class(**init_dict)
1819
1820
1821
        model.enable_group_offload(
            torch_device, offload_type="leaf_level", use_stream=True, record_stream=record_stream
        )
Aryan's avatar
Aryan committed
1822
        output_with_group_offloading4 = run_forward(model)
1823
        output_with_group_offloading4 = normalize_output(output_with_group_offloading4)
Aryan's avatar
Aryan committed
1824
1825
1826
1827
1828
1829

        self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading1, atol=1e-5))
        self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading2, atol=1e-5))
        self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading3, atol=1e-5))
        self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading4, atol=1e-5))

1830
1831
1832
1833
    @parameterized.expand([(False, "block_level"), (True, "leaf_level")])
    @require_torch_accelerator
    @torch.no_grad()
    def test_group_offloading_with_layerwise_casting(self, record_stream, offload_type):
1834
1835
1836
        if not self.model_class._supports_group_offloading:
            pytest.skip("Model does not support group offloading.")

1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
        torch.manual_seed(0)
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict)

        model.to(torch_device)
        model.eval()
        _ = model(**inputs_dict)[0]

        torch.manual_seed(0)
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        storage_dtype, compute_dtype = torch.float16, torch.float32
        inputs_dict = cast_maybe_tensor_dtype(inputs_dict, torch.float32, compute_dtype)
        model = self.model_class(**init_dict)
        model.eval()
        additional_kwargs = {} if offload_type == "leaf_level" else {"num_blocks_per_group": 1}
        model.enable_group_offload(
            torch_device, offload_type=offload_type, use_stream=True, record_stream=record_stream, **additional_kwargs
        )
        model.enable_layerwise_casting(storage_dtype=storage_dtype, compute_dtype=compute_dtype)
        _ = model(**inputs_dict)[0]

1858
    @parameterized.expand([("block_level", False), ("leaf_level", True)])
1859
1860
    @require_torch_accelerator
    @torch.no_grad()
1861
1862
    @torch.inference_mode()
    def test_group_offloading_with_disk(self, offload_type, record_stream, atol=1e-5):
1863
1864
1865
1866
1867
1868
        for cls in inspect.getmro(self.__class__):
            if "test_group_offloading_with_disk" in cls.__dict__ and cls is not ModelTesterMixin:
                # Skip this test if it is overwritten by child class. We need to do this because parameterized
                # materializes the test methods on invocation which cannot be overridden.
                pytest.skip("Model does not support group offloading with disk yet.")

1869
1870
1871
        if not self.model_class._supports_group_offloading:
            pytest.skip("Model does not support group offloading.")

1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
        def _has_generator_arg(model):
            sig = inspect.signature(model.forward)
            params = sig.parameters
            return "generator" in params

        def _run_forward(model, inputs_dict):
            accepts_generator = _has_generator_arg(model)
            if accepts_generator:
                inputs_dict["generator"] = torch.manual_seed(0)
            torch.manual_seed(0)
            return model(**inputs_dict)[0]

1884
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
1885
        torch.manual_seed(0)
1886
        model = self.model_class(**init_dict)
1887

1888
        model.eval()
1889
1890
        model.to(torch_device)
        output_without_group_offloading = _run_forward(model, inputs_dict)
1891
        output_without_group_offloading = normalize_output(output_without_group_offloading)
1892
1893
1894
1895
1896
1897
1898

        torch.manual_seed(0)
        model = self.model_class(**init_dict)
        model.eval()

        num_blocks_per_group = None if offload_type == "leaf_level" else 1
        additional_kwargs = {} if offload_type == "leaf_level" else {"num_blocks_per_group": num_blocks_per_group}
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
        with tempfile.TemporaryDirectory() as tmpdir:
            model.enable_group_offload(
                torch_device,
                offload_type=offload_type,
                offload_to_disk_path=tmpdir,
                use_stream=True,
                record_stream=record_stream,
                **additional_kwargs,
            )
            has_safetensors = glob.glob(f"{tmpdir}/*.safetensors")
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
            self.assertTrue(has_safetensors, "No safetensors found in the directory.")

            # For "leaf-level", there is a prefetching hook which makes this check a bit non-deterministic
            # in nature. So, skip it.
            if offload_type != "leaf_level":
                is_correct, extra_files, missing_files = _check_safetensors_serialization(
                    module=model,
                    offload_to_disk_path=tmpdir,
                    offload_type=offload_type,
                    num_blocks_per_group=num_blocks_per_group,
                )
                if not is_correct:
                    if extra_files:
                        raise ValueError(f"Found extra files: {', '.join(extra_files)}")
                    elif missing_files:
                        raise ValueError(f"Following files are missing: {', '.join(missing_files)}")

            output_with_group_offloading = _run_forward(model, inputs_dict)
1927
            output_with_group_offloading = normalize_output(output_with_group_offloading)
1928
            self.assertTrue(torch.allclose(output_without_group_offloading, output_with_group_offloading, atol=atol))
1929

hlky's avatar
hlky committed
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
    def test_auto_model(self, expected_max_diff=5e-5):
        if self.forward_requires_fresh_args:
            model = self.model_class(**self.init_dict)
        else:
            init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
            model = self.model_class(**init_dict)

        model = model.eval()
        model = model.to(torch_device)

        if hasattr(model, "set_default_attn_processor"):
            model.set_default_attn_processor()

        with tempfile.TemporaryDirectory(ignore_cleanup_errors=True) as tmpdirname:
            model.save_pretrained(tmpdirname, safe_serialization=False)

            auto_model = AutoModel.from_pretrained(tmpdirname)
            if hasattr(auto_model, "set_default_attn_processor"):
                auto_model.set_default_attn_processor()

        auto_model = auto_model.eval()
        auto_model = auto_model.to(torch_device)

        with torch.no_grad():
            if self.forward_requires_fresh_args:
                output_original = model(**self.inputs_dict(0))
                output_auto = auto_model(**self.inputs_dict(0))
            else:
                output_original = model(**inputs_dict)
                output_auto = auto_model(**inputs_dict)

1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
        if isinstance(output_original, dict):
            output_original = output_original.to_tuple()[0]
        if isinstance(output_auto, dict):
            output_auto = output_auto.to_tuple()[0]

        if isinstance(output_original, list):
            output_original = torch.stack(output_original)
        if isinstance(output_auto, list):
            output_auto = torch.stack(output_auto)

        output_original, output_auto = output_original.float(), output_auto.float()
hlky's avatar
hlky committed
1972
1973
1974
1975
1976
1977
1978
1979

        max_diff = (output_original - output_auto).abs().max().item()
        self.assertLessEqual(
            max_diff,
            expected_max_diff,
            f"AutoModel forward pass diff: {max_diff} exceeds threshold {expected_max_diff}",
        )

1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
    @parameterized.expand(
        [
            (-1, "You can't pass device_map as a negative int"),
            ("foo", "When passing device_map as a string, the value needs to be a device name"),
        ]
    )
    def test_wrong_device_map_raises_error(self, device_map, msg_substring):
        init_dict, _ = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict)
        with tempfile.TemporaryDirectory() as tmpdir:
            model.save_pretrained(tmpdir)
            with self.assertRaises(ValueError) as err_ctx:
                _ = self.model_class.from_pretrained(tmpdir, device_map=device_map)

        assert msg_substring in str(err_ctx.exception)

1996
1997
    @parameterized.expand([0, torch_device, torch.device(torch_device)])
    @require_torch_accelerator
1998
1999
2000
2001
2002
2003
2004
2005
    def test_passing_non_dict_device_map_works(self, device_map):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict).eval()
        with tempfile.TemporaryDirectory() as tmpdir:
            model.save_pretrained(tmpdir)
            loaded_model = self.model_class.from_pretrained(tmpdir, device_map=device_map)
            _ = loaded_model(**inputs_dict)

2006
2007
    @parameterized.expand([("", torch_device), ("", torch.device(torch_device))])
    @require_torch_accelerator
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
    def test_passing_dict_device_map_works(self, name, device):
        # There are other valid dict-based `device_map` values too. It's best to refer to
        # the docs for those: https://huggingface.co/docs/accelerate/en/concept_guides/big_model_inference#the-devicemap.
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict).eval()
        device_map = {name: device}
        with tempfile.TemporaryDirectory() as tmpdir:
            model.save_pretrained(tmpdir)
            loaded_model = self.model_class.from_pretrained(tmpdir, device_map=device_map)
            _ = loaded_model(**inputs_dict)

2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086

@is_staging_test
class ModelPushToHubTester(unittest.TestCase):
    identifier = uuid.uuid4()
    repo_id = f"test-model-{identifier}"
    org_repo_id = f"valid_org/{repo_id}-org"

    def test_push_to_hub(self):
        model = UNet2DConditionModel(
            block_out_channels=(32, 64),
            layers_per_block=2,
            sample_size=32,
            in_channels=4,
            out_channels=4,
            down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
            up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
            cross_attention_dim=32,
        )
        model.push_to_hub(self.repo_id, token=TOKEN)

        new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}")
        for p1, p2 in zip(model.parameters(), new_model.parameters()):
            self.assertTrue(torch.equal(p1, p2))

        # Reset repo
        delete_repo(token=TOKEN, repo_id=self.repo_id)

        # Push to hub via save_pretrained
        with tempfile.TemporaryDirectory() as tmp_dir:
            model.save_pretrained(tmp_dir, repo_id=self.repo_id, push_to_hub=True, token=TOKEN)

        new_model = UNet2DConditionModel.from_pretrained(f"{USER}/{self.repo_id}")
        for p1, p2 in zip(model.parameters(), new_model.parameters()):
            self.assertTrue(torch.equal(p1, p2))

        # Reset repo
        delete_repo(self.repo_id, token=TOKEN)

    def test_push_to_hub_in_organization(self):
        model = UNet2DConditionModel(
            block_out_channels=(32, 64),
            layers_per_block=2,
            sample_size=32,
            in_channels=4,
            out_channels=4,
            down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
            up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
            cross_attention_dim=32,
        )
        model.push_to_hub(self.org_repo_id, token=TOKEN)

        new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id)
        for p1, p2 in zip(model.parameters(), new_model.parameters()):
            self.assertTrue(torch.equal(p1, p2))

        # Reset repo
        delete_repo(token=TOKEN, repo_id=self.org_repo_id)

        # Push to hub via save_pretrained
        with tempfile.TemporaryDirectory() as tmp_dir:
            model.save_pretrained(tmp_dir, push_to_hub=True, token=TOKEN, repo_id=self.org_repo_id)

        new_model = UNet2DConditionModel.from_pretrained(self.org_repo_id)
        for p1, p2 in zip(model.parameters(), new_model.parameters()):
            self.assertTrue(torch.equal(p1, p2))

        # Reset repo
        delete_repo(self.org_repo_id, token=TOKEN)
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109

    @unittest.skipIf(
        not is_jinja_available(),
        reason="Model card tests cannot be performed without Jinja installed.",
    )
    def test_push_to_hub_library_name(self):
        model = UNet2DConditionModel(
            block_out_channels=(32, 64),
            layers_per_block=2,
            sample_size=32,
            in_channels=4,
            out_channels=4,
            down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
            up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
            cross_attention_dim=32,
        )
        model.push_to_hub(self.repo_id, token=TOKEN)

        model_card = ModelCard.load(f"{USER}/{self.repo_id}", token=TOKEN).data
        assert model_card.library_name == "diffusers"

        # Reset repo
        delete_repo(self.repo_id, token=TOKEN)
2110
2111


2112
@require_torch_accelerator
2113
2114
2115
@require_torch_2
@is_torch_compile
@slow
2116
@require_torch_version_greater("2.7.1")
2117
class TorchCompileTesterMixin:
2118
2119
    different_shapes_for_compilation = None

2120
2121
2122
    def setUp(self):
        # clean up the VRAM before each test
        super().setUp()
2123
        torch.compiler.reset()
2124
2125
2126
2127
2128
2129
        gc.collect()
        backend_empty_cache(torch_device)

    def tearDown(self):
        # clean up the VRAM after each test in case of CUDA runtime errors
        super().tearDown()
2130
        torch.compiler.reset()
2131
2132
2133
2134
2135
2136
2137
        gc.collect()
        backend_empty_cache(torch_device)

    def test_torch_compile_recompilation_and_graph_break(self):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

        model = self.model_class(**init_dict).to(torch_device)
2138
        model.eval()
2139
2140
        model = torch.compile(model, fullgraph=True)

2141
2142
2143
2144
2145
        with (
            torch._inductor.utils.fresh_inductor_cache(),
            torch._dynamo.config.patch(error_on_recompile=True),
            torch.no_grad(),
        ):
2146
2147
            _ = model(**inputs_dict)
            _ = model(**inputs_dict)
2148
2149
2150
2151
2152
2153
2154
2155

    def test_torch_compile_repeated_blocks(self):
        if self.model_class._repeated_blocks is None:
            pytest.skip("Skipping test as the model class doesn't have `_repeated_blocks` set.")

        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

        model = self.model_class(**init_dict).to(torch_device)
2156
        model.eval()
2157
2158
2159
2160
2161
        model.compile_repeated_blocks(fullgraph=True)

        recompile_limit = 1
        if self.model_class.__name__ == "UNet2DConditionModel":
            recompile_limit = 2
2162
2163
        elif self.model_class.__name__ == "ZImageTransformer2DModel":
            recompile_limit = 3
2164
2165
2166
2167
2168
2169
2170
2171

        with (
            torch._inductor.utils.fresh_inductor_cache(),
            torch._dynamo.config.patch(recompile_limit=recompile_limit),
            torch.no_grad(),
        ):
            _ = model(**inputs_dict)
            _ = model(**inputs_dict)
2172

2173
    def test_compile_with_group_offloading(self):
2174
2175
2176
        if not self.model_class._supports_group_offloading:
            pytest.skip("Model does not support group offloading.")

2177
2178
2179
2180
2181
2182
2183
        torch._dynamo.config.cache_size_limit = 10000

        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict)
        model.eval()
        # TODO: Can test for other group offloading kwargs later if needed.
        group_offload_kwargs = {
2184
            "onload_device": torch_device,
2185
2186
2187
2188
2189
2190
2191
2192
            "offload_device": "cpu",
            "offload_type": "block_level",
            "num_blocks_per_group": 1,
            "use_stream": True,
            "non_blocking": True,
        }
        model.enable_group_offload(**group_offload_kwargs)
        model.compile()
2193

2194
2195
2196
2197
        with torch.no_grad():
            _ = model(**inputs_dict)
            _ = model(**inputs_dict)

2198
2199
2200
2201
2202
2203
2204
    def test_compile_on_different_shapes(self):
        if self.different_shapes_for_compilation is None:
            pytest.skip(f"Skipping as `different_shapes_for_compilation` is not set for {self.__class__.__name__}.")
        torch.fx.experimental._config.use_duck_shape = False

        init_dict, _ = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict).to(torch_device)
2205
        model.eval()
2206
2207
2208
2209
2210
2211
2212
        model = torch.compile(model, fullgraph=True, dynamic=True)

        for height, width in self.different_shapes_for_compilation:
            with torch._dynamo.config.patch(error_on_recompile=True), torch.no_grad():
                inputs_dict = self.prepare_dummy_input(height=height, width=width)
                _ = model(**inputs_dict)

2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
    def test_compile_works_with_aot(self):
        from torch._inductor.package import load_package

        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

        model = self.model_class(**init_dict).to(torch_device)
        exported_model = torch.export.export(model, args=(), kwargs=inputs_dict)

        with tempfile.TemporaryDirectory() as tmpdir:
            package_path = os.path.join(tmpdir, f"{self.model_class.__name__}.pt2")
            _ = torch._inductor.aoti_compile_and_package(exported_model, package_path=package_path)
            assert os.path.exists(package_path)
            loaded_binary = load_package(package_path, run_single_threaded=True)

        model.forward = loaded_binary

        with torch.no_grad():
            _ = model(**inputs_dict)
            _ = model(**inputs_dict)

2233

2234
2235
2236
2237
2238
@slow
@require_torch_2
@require_torch_accelerator
@require_peft_backend
@require_peft_version_greater("0.14.0")
2239
@require_torch_version_greater("2.7.1")
2240
@is_torch_compile
2241
class LoraHotSwappingForModelTesterMixin:
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
    """Test that hotswapping does not result in recompilation on the model directly.

    We're not extensively testing the hotswapping functionality since it is implemented in PEFT and is extensively
    tested there. The goal of this test is specifically to ensure that hotswapping with diffusers does not require
    recompilation.

    See
    https://github.com/huggingface/peft/blob/eaab05e18d51fb4cce20a73c9acd82a00c013b83/tests/test_gpu_examples.py#L4252
    for the analogous PEFT test.

    """

2254
2255
    different_shapes_for_compilation = None

2256
2257
2258
2259
    def tearDown(self):
        # It is critical that the dynamo cache is reset for each test. Otherwise, if the test re-uses the same model,
        # there will be recompilation errors, as torch caches the model when run in the same process.
        super().tearDown()
2260
        torch.compiler.reset()
2261
2262
2263
        gc.collect()
        backend_empty_cache(torch_device)

2264
    def get_lora_config(self, lora_rank, lora_alpha, target_modules):
2265
2266
        from peft import LoraConfig

2267
        lora_config = LoraConfig(
2268
2269
2270
2271
2272
2273
            r=lora_rank,
            lora_alpha=lora_alpha,
            target_modules=target_modules,
            init_lora_weights=False,
            use_dora=False,
        )
2274
        return lora_config
2275

2276
2277
2278
2279
2280
    def get_linear_module_name_other_than_attn(self, model):
        linear_names = [
            name for name, module in model.named_modules() if isinstance(module, nn.Linear) and "to_" not in name
        ]
        return linear_names[0]
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291

    def check_model_hotswap(self, do_compile, rank0, rank1, target_modules0, target_modules1=None):
        """
        Check that hotswapping works on a small unet.

        Steps:
        - create 2 LoRA adapters and save them
        - load the first adapter
        - hotswap the second adapter
        - check that the outputs are correct
        - optionally compile the model
2292
        - optionally check if recompilations happen on different shapes
2293
2294
2295
2296
2297

        Note: We set rank == alpha here because save_lora_adapter does not save the alpha scalings, thus the test would
        fail if the values are different. Since rank != alpha does not matter for the purpose of this test, this is
        fine.
        """
2298
        different_shapes = self.different_shapes_for_compilation
2299
        # create 2 adapters with different ranks and alphas
2300
2301
2302
2303
        torch.manual_seed(0)
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict).to(torch_device)

2304
2305
2306
2307
        alpha0, alpha1 = rank0, rank1
        max_rank = max([rank0, rank1])
        if target_modules1 is None:
            target_modules1 = target_modules0[:]
2308
2309
        lora_config0 = self.get_lora_config(rank0, alpha0, target_modules0)
        lora_config1 = self.get_lora_config(rank1, alpha1, target_modules1)
2310

2311
        model.add_adapter(lora_config0, adapter_name="adapter0")
2312
        with torch.inference_mode():
2313
2314
            torch.manual_seed(0)
            output0_before = model(**inputs_dict)["sample"]
2315

2316
2317
        model.add_adapter(lora_config1, adapter_name="adapter1")
        model.set_adapter("adapter1")
2318
        with torch.inference_mode():
2319
2320
            torch.manual_seed(0)
            output1_before = model(**inputs_dict)["sample"]
2321
2322
2323
2324
2325
2326
2327
2328
2329

        # sanity checks:
        tol = 5e-3
        assert not torch.allclose(output0_before, output1_before, atol=tol, rtol=tol)
        assert not (output0_before == 0).all()
        assert not (output1_before == 0).all()

        with tempfile.TemporaryDirectory() as tmp_dirname:
            # save the adapter checkpoints
2330
2331
2332
            model.save_lora_adapter(os.path.join(tmp_dirname, "0"), safe_serialization=True, adapter_name="adapter0")
            model.save_lora_adapter(os.path.join(tmp_dirname, "1"), safe_serialization=True, adapter_name="adapter1")
            del model
2333
2334

            # load the first adapter
2335
2336
2337
2338
            torch.manual_seed(0)
            init_dict, _ = self.prepare_init_args_and_inputs_for_common()
            model = self.model_class(**init_dict).to(torch_device)

2339
2340
            if do_compile or (rank0 != rank1):
                # no need to prepare if the model is not compiled or if the ranks are identical
2341
                model.enable_lora_hotswap(target_rank=max_rank)
2342
2343
2344

            file_name0 = os.path.join(os.path.join(tmp_dirname, "0"), "pytorch_lora_weights.safetensors")
            file_name1 = os.path.join(os.path.join(tmp_dirname, "1"), "pytorch_lora_weights.safetensors")
2345
            model.load_lora_adapter(file_name0, safe_serialization=True, adapter_name="adapter0", prefix=None)
2346
2347

            if do_compile:
2348
                model = torch.compile(model, mode="reduce-overhead", dynamic=different_shapes is not None)
2349
2350

            with torch.inference_mode():
2351
2352
2353
2354
2355
2356
2357
2358
                # additionally check if dynamic compilation works.
                if different_shapes is not None:
                    for height, width in different_shapes:
                        new_inputs_dict = self.prepare_dummy_input(height=height, width=width)
                        _ = model(**new_inputs_dict)
                else:
                    output0_after = model(**inputs_dict)["sample"]
                    assert torch.allclose(output0_before, output0_after, atol=tol, rtol=tol)
2359
2360

            # hotswap the 2nd adapter
2361
            model.load_lora_adapter(file_name1, adapter_name="adapter0", hotswap=True, prefix=None)
2362
2363
2364

            # we need to call forward to potentially trigger recompilation
            with torch.inference_mode():
2365
2366
2367
2368
2369
2370
2371
                if different_shapes is not None:
                    for height, width in different_shapes:
                        new_inputs_dict = self.prepare_dummy_input(height=height, width=width)
                        _ = model(**new_inputs_dict)
                else:
                    output1_after = model(**inputs_dict)["sample"]
                    assert torch.allclose(output1_before, output1_after, atol=tol, rtol=tol)
2372
2373
2374
2375
2376

            # check error when not passing valid adapter name
            name = "does-not-exist"
            msg = f"Trying to hotswap LoRA adapter '{name}' but there is no existing adapter by that name"
            with self.assertRaisesRegex(ValueError, msg):
2377
                model.load_lora_adapter(file_name1, adapter_name=name, hotswap=True, prefix=None)
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388

    @parameterized.expand([(11, 11), (7, 13), (13, 7)])  # important to test small to large and vice versa
    def test_hotswapping_model(self, rank0, rank1):
        self.check_model_hotswap(
            do_compile=False, rank0=rank0, rank1=rank1, target_modules0=["to_q", "to_k", "to_v", "to_out.0"]
        )

    @parameterized.expand([(11, 11), (7, 13), (13, 7)])  # important to test small to large and vice versa
    def test_hotswapping_compiled_model_linear(self, rank0, rank1):
        # It's important to add this context to raise an error on recompilation
        target_modules = ["to_q", "to_k", "to_v", "to_out.0"]
2389
        with torch._dynamo.config.patch(error_on_recompile=True), torch._inductor.utils.fresh_inductor_cache():
2390
2391
2392
2393
            self.check_model_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules0=target_modules)

    @parameterized.expand([(11, 11), (7, 13), (13, 7)])  # important to test small to large and vice versa
    def test_hotswapping_compiled_model_conv2d(self, rank0, rank1):
2394
        if "unet" not in self.model_class.__name__.lower():
2395
            pytest.skip("Test only applies to UNet.")
2396

2397
2398
        # It's important to add this context to raise an error on recompilation
        target_modules = ["conv", "conv1", "conv2"]
2399
        with torch._dynamo.config.patch(error_on_recompile=True), torch._inductor.utils.fresh_inductor_cache():
2400
2401
2402
2403
            self.check_model_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules0=target_modules)

    @parameterized.expand([(11, 11), (7, 13), (13, 7)])  # important to test small to large and vice versa
    def test_hotswapping_compiled_model_both_linear_and_conv2d(self, rank0, rank1):
2404
        if "unet" not in self.model_class.__name__.lower():
2405
            pytest.skip("Test only applies to UNet.")
2406

2407
2408
        # It's important to add this context to raise an error on recompilation
        target_modules = ["to_q", "conv"]
2409
        with torch._dynamo.config.patch(error_on_recompile=True), torch._inductor.utils.fresh_inductor_cache():
2410
2411
            self.check_model_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules0=target_modules)

2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
    @parameterized.expand([(11, 11), (7, 13), (13, 7)])  # important to test small to large and vice versa
    def test_hotswapping_compiled_model_both_linear_and_other(self, rank0, rank1):
        # In `test_hotswapping_compiled_model_both_linear_and_conv2d()`, we check if we can do hotswapping
        # with `torch.compile()` for models that have both linear and conv layers. In this test, we check
        # if we can target a linear layer from the transformer blocks and another linear layer from non-attention
        # block.
        target_modules = ["to_q"]
        init_dict, _ = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict)

        target_modules.append(self.get_linear_module_name_other_than_attn(model))
        del model

        # It's important to add this context to raise an error on recompilation
        with torch._dynamo.config.patch(error_on_recompile=True):
            self.check_model_hotswap(do_compile=True, rank0=rank0, rank1=rank1, target_modules0=target_modules)

2429
2430
    def test_enable_lora_hotswap_called_after_adapter_added_raises(self):
        # ensure that enable_lora_hotswap is called before loading the first adapter
2431
2432
2433
2434
2435
        lora_config = self.get_lora_config(8, 8, target_modules=["to_q"])
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict).to(torch_device)
        model.add_adapter(lora_config)

2436
2437
        msg = re.escape("Call `enable_lora_hotswap` before loading the first adapter.")
        with self.assertRaisesRegex(RuntimeError, msg):
2438
            model.enable_lora_hotswap(target_rank=32)
2439
2440
2441
2442
2443

    def test_enable_lora_hotswap_called_after_adapter_added_warning(self):
        # ensure that enable_lora_hotswap is called before loading the first adapter
        from diffusers.loaders.peft import logger

2444
2445
2446
2447
        lora_config = self.get_lora_config(8, 8, target_modules=["to_q"])
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict).to(torch_device)
        model.add_adapter(lora_config)
2448
2449
2450
2451
        msg = (
            "It is recommended to call `enable_lora_hotswap` before loading the first adapter to avoid recompilation."
        )
        with self.assertLogs(logger=logger, level="WARNING") as cm:
2452
            model.enable_lora_hotswap(target_rank=32, check_compiled="warn")
2453
2454
2455
2456
            assert any(msg in log for log in cm.output)

    def test_enable_lora_hotswap_called_after_adapter_added_ignore(self):
        # check possibility to ignore the error/warning
2457
2458
        from diffusers.loaders.peft import logger

2459
2460
2461
2462
        lora_config = self.get_lora_config(8, 8, target_modules=["to_q"])
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict).to(torch_device)
        model.add_adapter(lora_config)
2463
2464
2465
        # note: assertNoLogs requires Python 3.10+
        with self.assertNoLogs(logger, level="WARNING"):
            model.enable_lora_hotswap(target_rank=32, check_compiled="ignore")
2466
2467
2468

    def test_enable_lora_hotswap_wrong_check_compiled_argument_raises(self):
        # check that wrong argument value raises an error
2469
2470
2471
2472
        lora_config = self.get_lora_config(8, 8, target_modules=["to_q"])
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict).to(torch_device)
        model.add_adapter(lora_config)
2473
2474
        msg = re.escape("check_compiles should be one of 'error', 'warn', or 'ignore', got 'wrong-argument' instead.")
        with self.assertRaisesRegex(ValueError, msg):
2475
            model.enable_lora_hotswap(target_rank=32, check_compiled="wrong-argument")
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489

    def test_hotswap_second_adapter_targets_more_layers_raises(self):
        # check the error and log
        from diffusers.loaders.peft import logger

        # at the moment, PEFT requires the 2nd adapter to target the same or a subset of layers
        target_modules0 = ["to_q"]
        target_modules1 = ["to_q", "to_k"]
        with self.assertRaises(RuntimeError):  # peft raises RuntimeError
            with self.assertLogs(logger=logger, level="ERROR") as cm:
                self.check_model_hotswap(
                    do_compile=True, rank0=8, rank1=8, target_modules0=target_modules0, target_modules1=target_modules1
                )
                assert any("Hotswapping adapter0 was unsuccessful" in log for log in cm.output)
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509

    @parameterized.expand([(11, 11), (7, 13), (13, 7)])
    @require_torch_version_greater("2.7.1")
    def test_hotswapping_compile_on_different_shapes(self, rank0, rank1):
        different_shapes_for_compilation = self.different_shapes_for_compilation
        if different_shapes_for_compilation is None:
            pytest.skip(f"Skipping as `different_shapes_for_compilation` is not set for {self.__class__.__name__}.")
        # Specifying `use_duck_shape=False` instructs the compiler if it should use the same symbolic
        # variable to represent input sizes that are the same. For more details,
        # check out this [comment](https://github.com/huggingface/diffusers/pull/11327#discussion_r2047659790).
        torch.fx.experimental._config.use_duck_shape = False

        target_modules = ["to_q", "to_k", "to_v", "to_out.0"]
        with torch._dynamo.config.patch(error_on_recompile=True):
            self.check_model_hotswap(
                do_compile=True,
                rank0=rank0,
                rank1=rank1,
                target_modules0=target_modules,
            )