test_models_unet_2d_condition.py 52 KB
Newer Older
1
# coding=utf-8
2
# Copyright 2024 HuggingFace Inc.
3
4
5
6
7
8
9
10
11
12
13
14
15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

16
import copy
17
import gc
18
import os
19
20
import tempfile
import unittest
21
from collections import OrderedDict
22
23

import torch
24
from parameterized import parameterized
25
from pytest import mark
26
27

from diffusers import UNet2DConditionModel
28
29
30
31
32
from diffusers.models.attention_processor import (
    CustomDiffusionAttnProcessor,
    IPAdapterAttnProcessor,
    IPAdapterAttnProcessor2_0,
)
33
from diffusers.models.embeddings import ImageProjection, IPAdapterFaceIDImageProjection, IPAdapterPlusImageProjection
Dhruv Nair's avatar
Dhruv Nair committed
34
35
36
from diffusers.utils import logging
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
Arsalan's avatar
Arsalan committed
37
    backend_empty_cache,
Dhruv Nair's avatar
Dhruv Nair committed
38
    enable_full_determinism,
39
40
    floats_tensor,
    load_hf_numpy,
Arsalan's avatar
Arsalan committed
41
42
43
    require_torch_accelerator,
    require_torch_accelerator_with_fp16,
    require_torch_accelerator_with_training,
44
    require_torch_gpu,
Arsalan's avatar
Arsalan committed
45
    skip_mps,
46
47
48
49
50
    slow,
    torch_all_close,
    torch_device,
)

51
from ..test_modeling_common import ModelTesterMixin, UNetTesterMixin
52
53
54


logger = logging.get_logger(__name__)
55
56

enable_full_determinism()
57
58


59
60
61
62
63
64
def create_ip_adapter_state_dict(model):
    # "ip_adapter" (cross-attention weights)
    ip_cross_attn_state_dict = {}
    key_id = 1

    for name in model.attn_processors.keys():
Aryan's avatar
Aryan committed
65
66
67
68
        cross_attention_dim = (
            None if name.endswith("attn1.processor") or "motion_module" in name else model.config.cross_attention_dim
        )

69
70
71
72
73
74
75
76
        if name.startswith("mid_block"):
            hidden_size = model.config.block_out_channels[-1]
        elif name.startswith("up_blocks"):
            block_id = int(name[len("up_blocks.")])
            hidden_size = list(reversed(model.config.block_out_channels))[block_id]
        elif name.startswith("down_blocks"):
            block_id = int(name[len("down_blocks.")])
            hidden_size = model.config.block_out_channels[block_id]
Aryan's avatar
Aryan committed
77

78
79
80
81
82
83
        if cross_attention_dim is not None:
            sd = IPAdapterAttnProcessor(
                hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1.0
            ).state_dict()
            ip_cross_attn_state_dict.update(
                {
84
85
                    f"{key_id}.to_k_ip.weight": sd["to_k_ip.0.weight"],
                    f"{key_id}.to_v_ip.weight": sd["to_v_ip.0.weight"],
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
                }
            )

            key_id += 2

    # "image_proj" (ImageProjection layer weights)
    cross_attention_dim = model.config["cross_attention_dim"]
    image_projection = ImageProjection(
        cross_attention_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, num_image_text_embeds=4
    )

    ip_image_projection_state_dict = {}
    sd = image_projection.state_dict()
    ip_image_projection_state_dict.update(
        {
            "proj.weight": sd["image_embeds.weight"],
            "proj.bias": sd["image_embeds.bias"],
            "norm.weight": sd["norm.weight"],
            "norm.bias": sd["norm.bias"],
        }
    )

    del sd
    ip_state_dict = {}
    ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict})
    return ip_state_dict


114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
def create_ip_adapter_plus_state_dict(model):
    # "ip_adapter" (cross-attention weights)
    ip_cross_attn_state_dict = {}
    key_id = 1

    for name in model.attn_processors.keys():
        cross_attention_dim = None if name.endswith("attn1.processor") else model.config.cross_attention_dim
        if name.startswith("mid_block"):
            hidden_size = model.config.block_out_channels[-1]
        elif name.startswith("up_blocks"):
            block_id = int(name[len("up_blocks.")])
            hidden_size = list(reversed(model.config.block_out_channels))[block_id]
        elif name.startswith("down_blocks"):
            block_id = int(name[len("down_blocks.")])
            hidden_size = model.config.block_out_channels[block_id]
        if cross_attention_dim is not None:
            sd = IPAdapterAttnProcessor(
                hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1.0
            ).state_dict()
            ip_cross_attn_state_dict.update(
                {
135
136
                    f"{key_id}.to_k_ip.weight": sd["to_k_ip.0.weight"],
                    f"{key_id}.to_v_ip.weight": sd["to_v_ip.0.weight"],
137
138
139
140
141
142
143
                }
            )

            key_id += 2

    # "image_proj" (ImageProjection layer weights)
    cross_attention_dim = model.config["cross_attention_dim"]
144
    image_projection = IPAdapterPlusImageProjection(
145
146
147
148
        embed_dims=cross_attention_dim, output_dims=cross_attention_dim, dim_head=32, heads=2, num_queries=4
    )

    ip_image_projection_state_dict = OrderedDict()
149
150
    keys = [k for k in image_projection.state_dict() if "layers." in k]
    print(keys)
151
152
153
    for k, v in image_projection.state_dict().items():
        if "2.to" in k:
            k = k.replace("2.to", "0.to")
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
        elif "layers.0.ln0" in k:
            k = k.replace("layers.0.ln0", "layers.0.0.norm1")
        elif "layers.0.ln1" in k:
            k = k.replace("layers.0.ln1", "layers.0.0.norm2")
        elif "layers.1.ln0" in k:
            k = k.replace("layers.1.ln0", "layers.1.0.norm1")
        elif "layers.1.ln1" in k:
            k = k.replace("layers.1.ln1", "layers.1.0.norm2")
        elif "layers.2.ln0" in k:
            k = k.replace("layers.2.ln0", "layers.2.0.norm1")
        elif "layers.2.ln1" in k:
            k = k.replace("layers.2.ln1", "layers.2.0.norm2")
        elif "layers.3.ln0" in k:
            k = k.replace("layers.3.ln0", "layers.3.0.norm1")
        elif "layers.3.ln1" in k:
            k = k.replace("layers.3.ln1", "layers.3.0.norm2")
        elif "to_q" in k:
            parts = k.split(".")
            parts[2] = "attn"
            k = ".".join(parts)
        elif "to_out.0" in k:
            parts = k.split(".")
            parts[2] = "attn"
            k = ".".join(parts)
            k = k.replace("to_out.0", "to_out")
        else:
            k = k.replace("0.ff.0", "0.1.0")
            k = k.replace("0.ff.1.net.0.proj", "0.1.1")
            k = k.replace("0.ff.1.net.2", "0.1.3")

            k = k.replace("1.ff.0", "1.1.0")
            k = k.replace("1.ff.1.net.0.proj", "1.1.1")
            k = k.replace("1.ff.1.net.2", "1.1.3")

            k = k.replace("2.ff.0", "2.1.0")
            k = k.replace("2.ff.1.net.0.proj", "2.1.1")
            k = k.replace("2.ff.1.net.2", "2.1.3")

            k = k.replace("3.ff.0", "3.1.0")
            k = k.replace("3.ff.1.net.0.proj", "3.1.1")
            k = k.replace("3.ff.1.net.2", "3.1.3")

        # if "norm_cross" in k:
        #     ip_image_projection_state_dict[k.replace("norm_cross", "norm1")] = v
        # elif "layer_norm" in k:
        #     ip_image_projection_state_dict[k.replace("layer_norm", "norm2")] = v
        if "to_k" in k:
            parts = k.split(".")
            parts[2] = "attn"
            k = ".".join(parts)
204
205
206
207
208
209
210
211
212
213
214
            ip_image_projection_state_dict[k.replace("to_k", "to_kv")] = torch.cat([v, v], dim=0)
        elif "to_v" in k:
            continue
        else:
            ip_image_projection_state_dict[k] = v

    ip_state_dict = {}
    ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict})
    return ip_state_dict


215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
def create_ip_adapter_faceid_state_dict(model):
    # "ip_adapter" (cross-attention weights)
    # no LoRA weights
    ip_cross_attn_state_dict = {}
    key_id = 1

    for name in model.attn_processors.keys():
        cross_attention_dim = (
            None if name.endswith("attn1.processor") or "motion_module" in name else model.config.cross_attention_dim
        )

        if name.startswith("mid_block"):
            hidden_size = model.config.block_out_channels[-1]
        elif name.startswith("up_blocks"):
            block_id = int(name[len("up_blocks.")])
            hidden_size = list(reversed(model.config.block_out_channels))[block_id]
        elif name.startswith("down_blocks"):
            block_id = int(name[len("down_blocks.")])
            hidden_size = model.config.block_out_channels[block_id]

        if cross_attention_dim is not None:
            sd = IPAdapterAttnProcessor(
                hidden_size=hidden_size, cross_attention_dim=cross_attention_dim, scale=1.0
            ).state_dict()
            ip_cross_attn_state_dict.update(
                {
                    f"{key_id}.to_k_ip.weight": sd["to_k_ip.0.weight"],
                    f"{key_id}.to_v_ip.weight": sd["to_v_ip.0.weight"],
                }
            )

            key_id += 2

    # "image_proj" (ImageProjection layer weights)
    cross_attention_dim = model.config["cross_attention_dim"]
    image_projection = IPAdapterFaceIDImageProjection(
        cross_attention_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, mult=2, num_tokens=4
    )

    ip_image_projection_state_dict = {}
    sd = image_projection.state_dict()
    ip_image_projection_state_dict.update(
        {
            "proj.0.weight": sd["ff.net.0.proj.weight"],
            "proj.0.bias": sd["ff.net.0.proj.bias"],
            "proj.2.weight": sd["ff.net.2.weight"],
            "proj.2.bias": sd["ff.net.2.bias"],
            "norm.weight": sd["norm.weight"],
            "norm.bias": sd["norm.bias"],
        }
    )

    del sd
    ip_state_dict = {}
    ip_state_dict.update({"image_proj": ip_image_projection_state_dict, "ip_adapter": ip_cross_attn_state_dict})
    return ip_state_dict


273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
def create_custom_diffusion_layers(model, mock_weights: bool = True):
    train_kv = True
    train_q_out = True
    custom_diffusion_attn_procs = {}

    st = model.state_dict()
    for name, _ in model.attn_processors.items():
        cross_attention_dim = None if name.endswith("attn1.processor") else model.config.cross_attention_dim
        if name.startswith("mid_block"):
            hidden_size = model.config.block_out_channels[-1]
        elif name.startswith("up_blocks"):
            block_id = int(name[len("up_blocks.")])
            hidden_size = list(reversed(model.config.block_out_channels))[block_id]
        elif name.startswith("down_blocks"):
            block_id = int(name[len("down_blocks.")])
            hidden_size = model.config.block_out_channels[block_id]
        layer_name = name.split(".processor")[0]
        weights = {
            "to_k_custom_diffusion.weight": st[layer_name + ".to_k.weight"],
            "to_v_custom_diffusion.weight": st[layer_name + ".to_v.weight"],
        }
        if train_q_out:
            weights["to_q_custom_diffusion.weight"] = st[layer_name + ".to_q.weight"]
            weights["to_out_custom_diffusion.0.weight"] = st[layer_name + ".to_out.0.weight"]
            weights["to_out_custom_diffusion.0.bias"] = st[layer_name + ".to_out.0.bias"]
        if cross_attention_dim is not None:
            custom_diffusion_attn_procs[name] = CustomDiffusionAttnProcessor(
                train_kv=train_kv,
                train_q_out=train_q_out,
                hidden_size=hidden_size,
                cross_attention_dim=cross_attention_dim,
            ).to(model.device)
            custom_diffusion_attn_procs[name].load_state_dict(weights)
            if mock_weights:
                # add 1 to weights to mock trained weights
                with torch.no_grad():
                    custom_diffusion_attn_procs[name].to_k_custom_diffusion.weight += 1
                    custom_diffusion_attn_procs[name].to_v_custom_diffusion.weight += 1
        else:
            custom_diffusion_attn_procs[name] = CustomDiffusionAttnProcessor(
                train_kv=False,
                train_q_out=False,
                hidden_size=hidden_size,
                cross_attention_dim=cross_attention_dim,
            )
    del st
    return custom_diffusion_attn_procs


322
class UNet2DConditionModelTests(ModelTesterMixin, UNetTesterMixin, unittest.TestCase):
323
    model_class = UNet2DConditionModel
324
    main_input_name = "sample"
325
326
    # We override the items here because the unet under consideration is small.
    model_split_percents = [0.5, 0.3, 0.4]
327
328
329
330
331

    @property
    def dummy_input(self):
        batch_size = 4
        num_channels = 4
332
        sizes = (16, 16)
333
334
335

        noise = floats_tensor((batch_size, num_channels) + sizes).to(torch_device)
        time_step = torch.tensor([10]).to(torch_device)
336
        encoder_hidden_states = floats_tensor((batch_size, 4, 8)).to(torch_device)
337
338
339
340
341

        return {"sample": noise, "timestep": time_step, "encoder_hidden_states": encoder_hidden_states}

    @property
    def input_shape(self):
342
        return (4, 16, 16)
343
344
345

    @property
    def output_shape(self):
346
        return (4, 16, 16)
347
348
349

    def prepare_init_args_and_inputs_for_common(self):
        init_dict = {
350
351
            "block_out_channels": (4, 8),
            "norm_num_groups": 4,
352
353
            "down_block_types": ("CrossAttnDownBlock2D", "DownBlock2D"),
            "up_block_types": ("UpBlock2D", "CrossAttnUpBlock2D"),
354
355
            "cross_attention_dim": 8,
            "attention_head_dim": 2,
356
357
            "out_channels": 4,
            "in_channels": 4,
358
359
            "layers_per_block": 1,
            "sample_size": 16,
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
        }
        inputs_dict = self.dummy_input
        return init_dict, inputs_dict

    @unittest.skipIf(
        torch_device != "cuda" or not is_xformers_available(),
        reason="XFormers attention is only available with CUDA and `xformers` installed",
    )
    def test_xformers_enable_works(self):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict)

        model.enable_xformers_memory_efficient_attention()

        assert (
375
            model.mid_block.attentions[0].transformer_blocks[0].attn1.processor.__class__.__name__
Patrick von Platen's avatar
Patrick von Platen committed
376
            == "XFormersAttnProcessor"
377
378
        ), "xformers is not enabled"

Arsalan's avatar
Arsalan committed
379
    @require_torch_accelerator_with_training
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
    def test_gradient_checkpointing(self):
        # enable deterministic behavior for gradient checkpointing
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        model = self.model_class(**init_dict)
        model.to(torch_device)

        assert not model.is_gradient_checkpointing and model.training

        out = model(**inputs_dict).sample
        # run the backwards pass on the model. For backwards pass, for simplicity purpose,
        # we won't calculate the loss and rather backprop on out.sum()
        model.zero_grad()

        labels = torch.randn_like(out)
        loss = (out - labels).mean()
        loss.backward()

        # re-instantiate the model now enabling gradient checkpointing
        model_2 = self.model_class(**init_dict)
        # clone model
        model_2.load_state_dict(model.state_dict())
        model_2.to(torch_device)
        model_2.enable_gradient_checkpointing()

        assert model_2.is_gradient_checkpointing and model_2.training

        out_2 = model_2(**inputs_dict).sample
        # run the backwards pass on the model. For backwards pass, for simplicity purpose,
        # we won't calculate the loss and rather backprop on out.sum()
        model_2.zero_grad()
        loss_2 = (out_2 - labels).mean()
        loss_2.backward()

        # compare the output and parameters gradients
        self.assertTrue((loss - loss_2).abs() < 1e-5)
        named_params = dict(model.named_parameters())
        named_params_2 = dict(model_2.named_parameters())
        for name, param in named_params.items():
            self.assertTrue(torch_all_close(param.grad.data, named_params_2[name].grad.data, atol=5e-5))

    def test_model_with_attention_head_dim_tuple(self):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

423
        init_dict["block_out_channels"] = (16, 32)
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
        init_dict["attention_head_dim"] = (8, 16)

        model = self.model_class(**init_dict)
        model.to(torch_device)
        model.eval()

        with torch.no_grad():
            output = model(**inputs_dict)

            if isinstance(output, dict):
                output = output.sample

        self.assertIsNotNone(output)
        expected_shape = inputs_dict["sample"].shape
        self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")

    def test_model_with_use_linear_projection(self):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

        init_dict["use_linear_projection"] = True

        model = self.model_class(**init_dict)
        model.to(torch_device)
        model.eval()

        with torch.no_grad():
            output = model(**inputs_dict)

            if isinstance(output, dict):
                output = output.sample

        self.assertIsNotNone(output)
        expected_shape = inputs_dict["sample"].shape
        self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")

Sanchit Gandhi's avatar
Sanchit Gandhi committed
459
460
461
    def test_model_with_cross_attention_dim_tuple(self):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

462
        init_dict["cross_attention_dim"] = (8, 8)
Sanchit Gandhi's avatar
Sanchit Gandhi committed
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526

        model = self.model_class(**init_dict)
        model.to(torch_device)
        model.eval()

        with torch.no_grad():
            output = model(**inputs_dict)

            if isinstance(output, dict):
                output = output.sample

        self.assertIsNotNone(output)
        expected_shape = inputs_dict["sample"].shape
        self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")

    def test_model_with_simple_projection(self):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

        batch_size, _, _, sample_size = inputs_dict["sample"].shape

        init_dict["class_embed_type"] = "simple_projection"
        init_dict["projection_class_embeddings_input_dim"] = sample_size

        inputs_dict["class_labels"] = floats_tensor((batch_size, sample_size)).to(torch_device)

        model = self.model_class(**init_dict)
        model.to(torch_device)
        model.eval()

        with torch.no_grad():
            output = model(**inputs_dict)

            if isinstance(output, dict):
                output = output.sample

        self.assertIsNotNone(output)
        expected_shape = inputs_dict["sample"].shape
        self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")

    def test_model_with_class_embeddings_concat(self):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

        batch_size, _, _, sample_size = inputs_dict["sample"].shape

        init_dict["class_embed_type"] = "simple_projection"
        init_dict["projection_class_embeddings_input_dim"] = sample_size
        init_dict["class_embeddings_concat"] = True

        inputs_dict["class_labels"] = floats_tensor((batch_size, sample_size)).to(torch_device)

        model = self.model_class(**init_dict)
        model.to(torch_device)
        model.eval()

        with torch.no_grad():
            output = model(**inputs_dict)

            if isinstance(output, dict):
                output = output.sample

        self.assertIsNotNone(output)
        expected_shape = inputs_dict["sample"].shape
        self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")

527
528
529
    def test_model_attention_slicing(self):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

530
        init_dict["block_out_channels"] = (16, 32)
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
        init_dict["attention_head_dim"] = (8, 16)

        model = self.model_class(**init_dict)
        model.to(torch_device)
        model.eval()

        model.set_attention_slice("auto")
        with torch.no_grad():
            output = model(**inputs_dict)
        assert output is not None

        model.set_attention_slice("max")
        with torch.no_grad():
            output = model(**inputs_dict)
        assert output is not None

        model.set_attention_slice(2)
        with torch.no_grad():
            output = model(**inputs_dict)
        assert output is not None

Alexander Pivovarov's avatar
Alexander Pivovarov committed
552
    def test_model_sliceable_head_dim(self):
553
554
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

555
        init_dict["block_out_channels"] = (16, 32)
556
557
558
559
        init_dict["attention_head_dim"] = (8, 16)

        model = self.model_class(**init_dict)

Alexander Pivovarov's avatar
Alexander Pivovarov committed
560
        def check_sliceable_dim_attr(module: torch.nn.Module):
561
562
563
564
            if hasattr(module, "set_attention_slice"):
                assert isinstance(module.sliceable_head_dim, int)

            for child in module.children():
Alexander Pivovarov's avatar
Alexander Pivovarov committed
565
                check_sliceable_dim_attr(child)
566
567
568

        # retrieve number of attention layers
        for module in model.children():
Alexander Pivovarov's avatar
Alexander Pivovarov committed
569
            check_sliceable_dim_attr(module)
570

571
572
573
    def test_gradient_checkpointing_is_applied(self):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

574
        init_dict["block_out_channels"] = (16, 32)
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
        init_dict["attention_head_dim"] = (8, 16)

        model_class_copy = copy.copy(self.model_class)

        modules_with_gc_enabled = {}

        # now monkey patch the following function:
        #     def _set_gradient_checkpointing(self, module, value=False):
        #         if hasattr(module, "gradient_checkpointing"):
        #             module.gradient_checkpointing = value

        def _set_gradient_checkpointing_new(self, module, value=False):
            if hasattr(module, "gradient_checkpointing"):
                module.gradient_checkpointing = value
                modules_with_gc_enabled[module.__class__.__name__] = True

        model_class_copy._set_gradient_checkpointing = _set_gradient_checkpointing_new

        model = model_class_copy(**init_dict)
        model.enable_gradient_checkpointing()

        EXPECTED_SET = {
            "CrossAttnUpBlock2D",
            "CrossAttnDownBlock2D",
            "UNetMidBlock2DCrossAttn",
            "UpBlock2D",
            "Transformer2DModel",
            "DownBlock2D",
        }

        assert set(modules_with_gc_enabled.keys()) == EXPECTED_SET
        assert all(modules_with_gc_enabled.values()), "All modules should be enabled"

608
609
610
611
612
613
614
615
616
617
618
    def test_special_attn_proc(self):
        class AttnEasyProc(torch.nn.Module):
            def __init__(self, num):
                super().__init__()
                self.weight = torch.nn.Parameter(torch.tensor(num))
                self.is_run = False
                self.number = 0
                self.counter = 0

            def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None, number=None):
                batch_size, sequence_length, _ = hidden_states.shape
619
                attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650

                query = attn.to_q(hidden_states)

                encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
                key = attn.to_k(encoder_hidden_states)
                value = attn.to_v(encoder_hidden_states)

                query = attn.head_to_batch_dim(query)
                key = attn.head_to_batch_dim(key)
                value = attn.head_to_batch_dim(value)

                attention_probs = attn.get_attention_scores(query, key, attention_mask)
                hidden_states = torch.bmm(attention_probs, value)
                hidden_states = attn.batch_to_head_dim(hidden_states)

                # linear proj
                hidden_states = attn.to_out[0](hidden_states)
                # dropout
                hidden_states = attn.to_out[1](hidden_states)

                hidden_states += self.weight

                self.is_run = True
                self.counter += 1
                self.number = number

                return hidden_states

        # enable deterministic behavior for gradient checkpointing
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

651
        init_dict["block_out_channels"] = (16, 32)
652
653
654
655
656
657
658
659
660
661
        init_dict["attention_head_dim"] = (8, 16)

        model = self.model_class(**init_dict)
        model.to(torch_device)

        processor = AttnEasyProc(5.0)

        model.set_attn_processor(processor)
        model(**inputs_dict, cross_attention_kwargs={"number": 123}).sample

662
        assert processor.counter == 8
663
664
665
        assert processor.is_run
        assert processor.number == 123

666
667
668
669
670
671
672
673
674
675
676
677
    @parameterized.expand(
        [
            # fmt: off
            [torch.bool],
            [torch.long],
            [torch.float],
            # fmt: on
        ]
    )
    def test_model_xattn_mask(self, mask_dtype):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

678
        model = self.model_class(**{**init_dict, "attention_head_dim": (8, 16), "block_out_channels": (16, 32)})
679
680
681
682
683
684
685
686
687
688
689
        model.to(torch_device)
        model.eval()

        cond = inputs_dict["encoder_hidden_states"]
        with torch.no_grad():
            full_cond_out = model(**inputs_dict).sample
            assert full_cond_out is not None

            keepall_mask = torch.ones(*cond.shape[:-1], device=cond.device, dtype=mask_dtype)
            full_cond_keepallmask_out = model(**{**inputs_dict, "encoder_attention_mask": keepall_mask}).sample
            assert full_cond_keepallmask_out.allclose(
690
                full_cond_out, rtol=1e-05, atol=1e-05
691
692
693
694
695
            ), "a 'keep all' mask should give the same result as no mask"

            trunc_cond = cond[:, :-1, :]
            trunc_cond_out = model(**{**inputs_dict, "encoder_hidden_states": trunc_cond}).sample
            assert not trunc_cond_out.allclose(
696
                full_cond_out, rtol=1e-05, atol=1e-05
697
698
699
700
701
702
            ), "discarding the last token from our cond should change the result"

            batch, tokens, _ = cond.shape
            mask_last = (torch.arange(tokens) < tokens - 1).expand(batch, -1).to(cond.device, mask_dtype)
            masked_cond_out = model(**{**inputs_dict, "encoder_attention_mask": mask_last}).sample
            assert masked_cond_out.allclose(
703
                trunc_cond_out, rtol=1e-05, atol=1e-05
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
            ), "masking the last token from our cond should be equivalent to truncating that token out of the condition"

    # see diffusers.models.attention_processor::Attention#prepare_attention_mask
    # note: we may not need to fix mask padding to work for stable-diffusion cross-attn masks.
    # since the use-case (somebody passes in a too-short cross-attn mask) is pretty esoteric.
    # maybe it's fine that this only works for the unclip use-case.
    @mark.skip(
        reason="we currently pad mask by target_length tokens (what unclip needs), whereas stable-diffusion's cross-attn needs to instead pad by remaining_length."
    )
    def test_model_xattn_padding(self):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

        model = self.model_class(**{**init_dict, "attention_head_dim": (8, 16)})
        model.to(torch_device)
        model.eval()

        cond = inputs_dict["encoder_hidden_states"]
        with torch.no_grad():
            full_cond_out = model(**inputs_dict).sample
            assert full_cond_out is not None

            batch, tokens, _ = cond.shape
            keeplast_mask = (torch.arange(tokens) == tokens - 1).expand(batch, -1).to(cond.device, torch.bool)
            keeplast_out = model(**{**inputs_dict, "encoder_attention_mask": keeplast_mask}).sample
            assert not keeplast_out.allclose(full_cond_out), "a 'keep last token' mask should change the result"

            trunc_mask = torch.zeros(batch, tokens - 1, device=cond.device, dtype=torch.bool)
            trunc_mask_out = model(**{**inputs_dict, "encoder_attention_mask": trunc_mask}).sample
            assert trunc_mask_out.allclose(
                keeplast_out
            ), "a mask with fewer tokens than condition, will be padded with 'keep' tokens. a 'discard-all' mask missing the final token is thus equivalent to a 'keep last' mask."

736
737
738
739
    def test_custom_diffusion_processors(self):
        # enable deterministic behavior for gradient checkpointing
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

740
        init_dict["block_out_channels"] = (16, 32)
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
        init_dict["attention_head_dim"] = (8, 16)

        model = self.model_class(**init_dict)
        model.to(torch_device)

        with torch.no_grad():
            sample1 = model(**inputs_dict).sample

        custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False)

        # make sure we can set a list of attention processors
        model.set_attn_processor(custom_diffusion_attn_procs)
        model.to(torch_device)

        # test that attn processors can be set to itself
        model.set_attn_processor(model.attn_processors)

        with torch.no_grad():
            sample2 = model(**inputs_dict).sample

761
        assert (sample1 - sample2).abs().max() < 3e-3
762
763
764
765
766

    def test_custom_diffusion_save_load(self):
        # enable deterministic behavior for gradient checkpointing
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

767
        init_dict["block_out_channels"] = (16, 32)
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
        init_dict["attention_head_dim"] = (8, 16)

        torch.manual_seed(0)
        model = self.model_class(**init_dict)
        model.to(torch_device)

        with torch.no_grad():
            old_sample = model(**inputs_dict).sample

        custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False)
        model.set_attn_processor(custom_diffusion_attn_procs)

        with torch.no_grad():
            sample = model(**inputs_dict).sample

        with tempfile.TemporaryDirectory() as tmpdirname:
784
            model.save_attn_procs(tmpdirname, safe_serialization=False)
785
786
787
788
            self.assertTrue(os.path.isfile(os.path.join(tmpdirname, "pytorch_custom_diffusion_weights.bin")))
            torch.manual_seed(0)
            new_model = self.model_class(**init_dict)
            new_model.load_attn_procs(tmpdirname, weight_name="pytorch_custom_diffusion_weights.bin")
789
            new_model.to(torch_device)
790
791
792
793
794
795
796

        with torch.no_grad():
            new_sample = new_model(**inputs_dict).sample

        assert (sample - new_sample).abs().max() < 1e-4

        # custom diffusion and no custom diffusion should be the same
797
        assert (sample - old_sample).abs().max() < 3e-3
798
799
800
801
802
803
804
805
806

    @unittest.skipIf(
        torch_device != "cuda" or not is_xformers_available(),
        reason="XFormers attention is only available with CUDA and `xformers` installed",
    )
    def test_custom_diffusion_xformers_on_off(self):
        # enable deterministic behavior for gradient checkpointing
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

807
        init_dict["block_out_channels"] = (16, 32)
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
        init_dict["attention_head_dim"] = (8, 16)

        torch.manual_seed(0)
        model = self.model_class(**init_dict)
        model.to(torch_device)
        custom_diffusion_attn_procs = create_custom_diffusion_layers(model, mock_weights=False)
        model.set_attn_processor(custom_diffusion_attn_procs)

        # default
        with torch.no_grad():
            sample = model(**inputs_dict).sample

            model.enable_xformers_memory_efficient_attention()
            on_sample = model(**inputs_dict).sample

            model.disable_xformers_memory_efficient_attention()
            off_sample = model(**inputs_dict).sample

        assert (sample - on_sample).abs().max() < 1e-4
        assert (sample - off_sample).abs().max() < 1e-4

829
830
831
832
    def test_pickle(self):
        # enable deterministic behavior for gradient checkpointing
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

833
        init_dict["block_out_channels"] = (16, 32)
834
835
836
837
838
839
840
841
842
843
844
845
        init_dict["attention_head_dim"] = (8, 16)

        model = self.model_class(**init_dict)
        model.to(torch_device)

        with torch.no_grad():
            sample = model(**inputs_dict).sample

        sample_copy = copy.copy(sample)

        assert (sample - sample_copy).abs().max() < 1e-4

846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
    def test_asymmetrical_unet(self):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()
        # Add asymmetry to configs
        init_dict["transformer_layers_per_block"] = [[3, 2], 1]
        init_dict["reverse_transformer_layers_per_block"] = [[3, 4], 1]

        torch.manual_seed(0)
        model = self.model_class(**init_dict)
        model.to(torch_device)

        output = model(**inputs_dict).sample
        expected_shape = inputs_dict["sample"].shape

        # Check if input and output shapes are the same
        self.assertEqual(output.shape, expected_shape, "Input and output shapes do not match")

862
863
864
    def test_ip_adapter(self):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

865
        init_dict["block_out_channels"] = (16, 32)
866
867
868
869
870
871
872
873
874
875
876
        init_dict["attention_head_dim"] = (8, 16)

        model = self.model_class(**init_dict)
        model.to(torch_device)

        # forward pass without ip-adapter
        with torch.no_grad():
            sample1 = model(**inputs_dict).sample

        # update inputs_dict for ip-adapter
        batch_size = inputs_dict["encoder_hidden_states"].shape[0]
877
        # for ip-adapter image_embeds has shape [batch_size, num_image, embed_dim]
878
        image_embeds = floats_tensor((batch_size, 1, model.config.cross_attention_dim)).to(torch_device)
879
        inputs_dict["added_cond_kwargs"] = {"image_embeds": [image_embeds]}
880
881
882
883
884
885
886
887
888
889

        # make ip_adapter_1 and ip_adapter_2
        ip_adapter_1 = create_ip_adapter_state_dict(model)

        image_proj_state_dict_2 = {k: w + 1.0 for k, w in ip_adapter_1["image_proj"].items()}
        cross_attn_state_dict_2 = {k: w + 1.0 for k, w in ip_adapter_1["ip_adapter"].items()}
        ip_adapter_2 = {}
        ip_adapter_2.update({"image_proj": image_proj_state_dict_2, "ip_adapter": cross_attn_state_dict_2})

        # forward pass ip_adapter_1
890
        model._load_ip_adapter_weights([ip_adapter_1])
891
892
893
894
895
896
897
        assert model.config.encoder_hid_dim_type == "ip_image_proj"
        assert model.encoder_hid_proj is not None
        assert model.down_blocks[0].attentions[0].transformer_blocks[0].attn2.processor.__class__.__name__ in (
            "IPAdapterAttnProcessor",
            "IPAdapterAttnProcessor2_0",
        )
        with torch.no_grad():
898
899
900
            sample2 = model(**inputs_dict).sample

        # forward pass with ip_adapter_2
901
        model._load_ip_adapter_weights([ip_adapter_2])
902
903
904
905
        with torch.no_grad():
            sample3 = model(**inputs_dict).sample

        # forward pass with ip_adapter_1 again
906
        model._load_ip_adapter_weights([ip_adapter_1])
907
908
909
        with torch.no_grad():
            sample4 = model(**inputs_dict).sample

910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
        # forward pass with multiple ip-adapters and multiple images
        model._load_ip_adapter_weights([ip_adapter_1, ip_adapter_2])
        # set the scale for ip_adapter_2 to 0 so that result should be same as only load ip_adapter_1
        for attn_processor in model.attn_processors.values():
            if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):
                attn_processor.scale = [1, 0]
        image_embeds_multi = image_embeds.repeat(1, 2, 1)
        inputs_dict["added_cond_kwargs"] = {"image_embeds": [image_embeds_multi, image_embeds_multi]}
        with torch.no_grad():
            sample5 = model(**inputs_dict).sample

        # forward pass with single ip-adapter & single image when image_embeds is not a list and a 2-d tensor
        image_embeds = image_embeds.squeeze(1)
        inputs_dict["added_cond_kwargs"] = {"image_embeds": image_embeds}

        model._load_ip_adapter_weights(ip_adapter_1)
        with torch.no_grad():
            sample6 = model(**inputs_dict).sample

929
930
931
        assert not sample1.allclose(sample2, atol=1e-4, rtol=1e-4)
        assert not sample2.allclose(sample3, atol=1e-4, rtol=1e-4)
        assert sample2.allclose(sample4, atol=1e-4, rtol=1e-4)
932
933
        assert sample2.allclose(sample5, atol=1e-4, rtol=1e-4)
        assert sample2.allclose(sample6, atol=1e-4, rtol=1e-4)
934
935
936
937

    def test_ip_adapter_plus(self):
        init_dict, inputs_dict = self.prepare_init_args_and_inputs_for_common()

938
        init_dict["block_out_channels"] = (16, 32)
939
940
941
942
943
944
945
946
947
948
949
        init_dict["attention_head_dim"] = (8, 16)

        model = self.model_class(**init_dict)
        model.to(torch_device)

        # forward pass without ip-adapter
        with torch.no_grad():
            sample1 = model(**inputs_dict).sample

        # update inputs_dict for ip-adapter
        batch_size = inputs_dict["encoder_hidden_states"].shape[0]
950
        # for ip-adapter-plus image_embeds has shape [batch_size, num_image, sequence_length, embed_dim]
951
        image_embeds = floats_tensor((batch_size, 1, 1, model.config.cross_attention_dim)).to(torch_device)
952
        inputs_dict["added_cond_kwargs"] = {"image_embeds": [image_embeds]}
953
954
955
956
957
958
959
960
961
962

        # make ip_adapter_1 and ip_adapter_2
        ip_adapter_1 = create_ip_adapter_plus_state_dict(model)

        image_proj_state_dict_2 = {k: w + 1.0 for k, w in ip_adapter_1["image_proj"].items()}
        cross_attn_state_dict_2 = {k: w + 1.0 for k, w in ip_adapter_1["ip_adapter"].items()}
        ip_adapter_2 = {}
        ip_adapter_2.update({"image_proj": image_proj_state_dict_2, "ip_adapter": cross_attn_state_dict_2})

        # forward pass ip_adapter_1
963
        model._load_ip_adapter_weights([ip_adapter_1])
964
965
966
967
968
969
970
        assert model.config.encoder_hid_dim_type == "ip_image_proj"
        assert model.encoder_hid_proj is not None
        assert model.down_blocks[0].attentions[0].transformer_blocks[0].attn2.processor.__class__.__name__ in (
            "IPAdapterAttnProcessor",
            "IPAdapterAttnProcessor2_0",
        )
        with torch.no_grad():
971
972
973
            sample2 = model(**inputs_dict).sample

        # forward pass with ip_adapter_2
974
        model._load_ip_adapter_weights([ip_adapter_2])
975
976
977
978
        with torch.no_grad():
            sample3 = model(**inputs_dict).sample

        # forward pass with ip_adapter_1 again
979
        model._load_ip_adapter_weights([ip_adapter_1])
980
981
982
        with torch.no_grad():
            sample4 = model(**inputs_dict).sample

983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
        # forward pass with multiple ip-adapters and multiple images
        model._load_ip_adapter_weights([ip_adapter_1, ip_adapter_2])
        # set the scale for ip_adapter_2 to 0 so that result should be same as only load ip_adapter_1
        for attn_processor in model.attn_processors.values():
            if isinstance(attn_processor, (IPAdapterAttnProcessor, IPAdapterAttnProcessor2_0)):
                attn_processor.scale = [1, 0]
        image_embeds_multi = image_embeds.repeat(1, 2, 1, 1)
        inputs_dict["added_cond_kwargs"] = {"image_embeds": [image_embeds_multi, image_embeds_multi]}
        with torch.no_grad():
            sample5 = model(**inputs_dict).sample

        # forward pass with single ip-adapter & single image when image_embeds is a 3-d tensor
        image_embeds = image_embeds[:,].squeeze(1)
        inputs_dict["added_cond_kwargs"] = {"image_embeds": image_embeds}

        model._load_ip_adapter_weights(ip_adapter_1)
        with torch.no_grad():
            sample6 = model(**inputs_dict).sample

1002
1003
1004
        assert not sample1.allclose(sample2, atol=1e-4, rtol=1e-4)
        assert not sample2.allclose(sample3, atol=1e-4, rtol=1e-4)
        assert sample2.allclose(sample4, atol=1e-4, rtol=1e-4)
1005
1006
        assert sample2.allclose(sample5, atol=1e-4, rtol=1e-4)
        assert sample2.allclose(sample6, atol=1e-4, rtol=1e-4)
1007

1008
1009
1010
1011
1012
1013
1014
1015
1016
1017

@slow
class UNet2DConditionModelIntegrationTests(unittest.TestCase):
    def get_file_format(self, seed, shape):
        return f"gaussian_noise_s={seed}_shape={'_'.join([str(s) for s in shape])}.npy"

    def tearDown(self):
        # clean up the VRAM after each test
        super().tearDown()
        gc.collect()
1018
        backend_empty_cache(torch_device)
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035

    def get_latents(self, seed=0, shape=(4, 4, 64, 64), fp16=False):
        dtype = torch.float16 if fp16 else torch.float32
        image = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype)
        return image

    def get_unet_model(self, fp16=False, model_id="CompVis/stable-diffusion-v1-4"):
        revision = "fp16" if fp16 else None
        torch_dtype = torch.float16 if fp16 else torch.float32

        model = UNet2DConditionModel.from_pretrained(
            model_id, subfolder="unet", torch_dtype=torch_dtype, revision=revision
        )
        model.to(torch_device).eval()

        return model

Arsalan's avatar
Arsalan committed
1036
    @require_torch_gpu
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
    def test_set_attention_slice_auto(self):
        torch.cuda.empty_cache()
        torch.cuda.reset_max_memory_allocated()
        torch.cuda.reset_peak_memory_stats()

        unet = self.get_unet_model()
        unet.set_attention_slice("auto")

        latents = self.get_latents(33)
        encoder_hidden_states = self.get_encoder_hidden_states(33)
        timestep = 1

        with torch.no_grad():
            _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample

        mem_bytes = torch.cuda.max_memory_allocated()

        assert mem_bytes < 5 * 10**9

Arsalan's avatar
Arsalan committed
1056
    @require_torch_gpu
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
    def test_set_attention_slice_max(self):
        torch.cuda.empty_cache()
        torch.cuda.reset_max_memory_allocated()
        torch.cuda.reset_peak_memory_stats()

        unet = self.get_unet_model()
        unet.set_attention_slice("max")

        latents = self.get_latents(33)
        encoder_hidden_states = self.get_encoder_hidden_states(33)
        timestep = 1

        with torch.no_grad():
            _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample

        mem_bytes = torch.cuda.max_memory_allocated()

        assert mem_bytes < 5 * 10**9

Arsalan's avatar
Arsalan committed
1076
    @require_torch_gpu
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
    def test_set_attention_slice_int(self):
        torch.cuda.empty_cache()
        torch.cuda.reset_max_memory_allocated()
        torch.cuda.reset_peak_memory_stats()

        unet = self.get_unet_model()
        unet.set_attention_slice(2)

        latents = self.get_latents(33)
        encoder_hidden_states = self.get_encoder_hidden_states(33)
        timestep = 1

        with torch.no_grad():
            _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample

        mem_bytes = torch.cuda.max_memory_allocated()

        assert mem_bytes < 5 * 10**9

Arsalan's avatar
Arsalan committed
1096
    @require_torch_gpu
1097
1098
1099
1100
1101
    def test_set_attention_slice_list(self):
        torch.cuda.empty_cache()
        torch.cuda.reset_max_memory_allocated()
        torch.cuda.reset_peak_memory_stats()

Alexander Pivovarov's avatar
Alexander Pivovarov committed
1102
        # there are 32 sliceable layers
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
        slice_list = 16 * [2, 3]
        unet = self.get_unet_model()
        unet.set_attention_slice(slice_list)

        latents = self.get_latents(33)
        encoder_hidden_states = self.get_encoder_hidden_states(33)
        timestep = 1

        with torch.no_grad():
            _ = unet(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample

        mem_bytes = torch.cuda.max_memory_allocated()

        assert mem_bytes < 5 * 10**9

    def get_encoder_hidden_states(self, seed=0, shape=(4, 77, 768), fp16=False):
        dtype = torch.float16 if fp16 else torch.float32
        hidden_states = torch.from_numpy(load_hf_numpy(self.get_file_format(seed, shape))).to(torch_device).to(dtype)
        return hidden_states

    @parameterized.expand(
        [
            # fmt: off
            [33, 4, [-0.4424, 0.1510, -0.1937, 0.2118, 0.3746, -0.3957, 0.0160, -0.0435]],
            [47, 0.55, [-0.1508, 0.0379, -0.3075, 0.2540, 0.3633, -0.0821, 0.1719, -0.0207]],
            [21, 0.89, [-0.6479, 0.6364, -0.3464, 0.8697, 0.4443, -0.6289, -0.0091, 0.1778]],
            [9, 1000, [0.8888, -0.5659, 0.5834, -0.7469, 1.1912, -0.3923, 1.1241, -0.4424]],
            # fmt: on
        ]
    )
Arsalan's avatar
Arsalan committed
1133
    @require_torch_accelerator_with_fp16
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
    def test_compvis_sd_v1_4(self, seed, timestep, expected_slice):
        model = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4")
        latents = self.get_latents(seed)
        encoder_hidden_states = self.get_encoder_hidden_states(seed)

        timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device)

        with torch.no_grad():
            sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample

        assert sample.shape == latents.shape

        output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
        expected_output_slice = torch.tensor(expected_slice)

        assert torch_all_close(output_slice, expected_output_slice, atol=1e-3)

    @parameterized.expand(
        [
            # fmt: off
            [83, 4, [-0.2323, -0.1304, 0.0813, -0.3093, -0.0919, -0.1571, -0.1125, -0.5806]],
            [17, 0.55, [-0.0831, -0.2443, 0.0901, -0.0919, 0.3396, 0.0103, -0.3743, 0.0701]],
            [8, 0.89, [-0.4863, 0.0859, 0.0875, -0.1658, 0.9199, -0.0114, 0.4839, 0.4639]],
            [3, 1000, [-0.5649, 0.2402, -0.5518, 0.1248, 1.1328, -0.2443, -0.0325, -1.0078]],
            # fmt: on
        ]
    )
Arsalan's avatar
Arsalan committed
1161
    @require_torch_accelerator_with_fp16
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
    def test_compvis_sd_v1_4_fp16(self, seed, timestep, expected_slice):
        model = self.get_unet_model(model_id="CompVis/stable-diffusion-v1-4", fp16=True)
        latents = self.get_latents(seed, fp16=True)
        encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True)

        timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device)

        with torch.no_grad():
            sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample

        assert sample.shape == latents.shape

        output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
        expected_output_slice = torch.tensor(expected_slice)

        assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)

    @parameterized.expand(
        [
            # fmt: off
            [33, 4, [-0.4430, 0.1570, -0.1867, 0.2376, 0.3205, -0.3681, 0.0525, -0.0722]],
            [47, 0.55, [-0.1415, 0.0129, -0.3136, 0.2257, 0.3430, -0.0536, 0.2114, -0.0436]],
            [21, 0.89, [-0.7091, 0.6664, -0.3643, 0.9032, 0.4499, -0.6541, 0.0139, 0.1750]],
            [9, 1000, [0.8878, -0.5659, 0.5844, -0.7442, 1.1883, -0.3927, 1.1192, -0.4423]],
            # fmt: on
        ]
    )
Arsalan's avatar
Arsalan committed
1189
1190
    @require_torch_accelerator
    @skip_mps
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
    def test_compvis_sd_v1_5(self, seed, timestep, expected_slice):
        model = self.get_unet_model(model_id="runwayml/stable-diffusion-v1-5")
        latents = self.get_latents(seed)
        encoder_hidden_states = self.get_encoder_hidden_states(seed)

        timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device)

        with torch.no_grad():
            sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample

        assert sample.shape == latents.shape

        output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
        expected_output_slice = torch.tensor(expected_slice)

        assert torch_all_close(output_slice, expected_output_slice, atol=1e-3)

    @parameterized.expand(
        [
            # fmt: off
            [83, 4, [-0.2695, -0.1669, 0.0073, -0.3181, -0.1187, -0.1676, -0.1395, -0.5972]],
            [17, 0.55, [-0.1290, -0.2588, 0.0551, -0.0916, 0.3286, 0.0238, -0.3669, 0.0322]],
            [8, 0.89, [-0.5283, 0.1198, 0.0870, -0.1141, 0.9189, -0.0150, 0.5474, 0.4319]],
            [3, 1000, [-0.5601, 0.2411, -0.5435, 0.1268, 1.1338, -0.2427, -0.0280, -1.0020]],
            # fmt: on
        ]
    )
Arsalan's avatar
Arsalan committed
1218
    @require_torch_accelerator_with_fp16
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
    def test_compvis_sd_v1_5_fp16(self, seed, timestep, expected_slice):
        model = self.get_unet_model(model_id="runwayml/stable-diffusion-v1-5", fp16=True)
        latents = self.get_latents(seed, fp16=True)
        encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True)

        timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device)

        with torch.no_grad():
            sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample

        assert sample.shape == latents.shape

        output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
        expected_output_slice = torch.tensor(expected_slice)

        assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)

    @parameterized.expand(
        [
            # fmt: off
            [33, 4, [-0.7639, 0.0106, -0.1615, -0.3487, -0.0423, -0.7972, 0.0085, -0.4858]],
            [47, 0.55, [-0.6564, 0.0795, -1.9026, -0.6258, 1.8235, 1.2056, 1.2169, 0.9073]],
            [21, 0.89, [0.0327, 0.4399, -0.6358, 0.3417, 0.4120, -0.5621, -0.0397, -1.0430]],
            [9, 1000, [0.1600, 0.7303, -1.0556, -0.3515, -0.7440, -1.2037, -1.8149, -1.8931]],
            # fmt: on
        ]
    )
Arsalan's avatar
Arsalan committed
1246
1247
    @require_torch_accelerator
    @skip_mps
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
    def test_compvis_sd_inpaint(self, seed, timestep, expected_slice):
        model = self.get_unet_model(model_id="runwayml/stable-diffusion-inpainting")
        latents = self.get_latents(seed, shape=(4, 9, 64, 64))
        encoder_hidden_states = self.get_encoder_hidden_states(seed)

        timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device)

        with torch.no_grad():
            sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample

        assert sample.shape == (4, 4, 64, 64)

        output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
        expected_output_slice = torch.tensor(expected_slice)

1263
        assert torch_all_close(output_slice, expected_output_slice, atol=3e-3)
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274

    @parameterized.expand(
        [
            # fmt: off
            [83, 4, [-0.1047, -1.7227, 0.1067, 0.0164, -0.5698, -0.4172, -0.1388, 1.1387]],
            [17, 0.55, [0.0975, -0.2856, -0.3508, -0.4600, 0.3376, 0.2930, -0.2747, -0.7026]],
            [8, 0.89, [-0.0952, 0.0183, -0.5825, -0.1981, 0.1131, 0.4668, -0.0395, -0.3486]],
            [3, 1000, [0.4790, 0.4949, -1.0732, -0.7158, 0.7959, -0.9478, 0.1105, -0.9741]],
            # fmt: on
        ]
    )
Arsalan's avatar
Arsalan committed
1275
    @require_torch_accelerator_with_fp16
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
    def test_compvis_sd_inpaint_fp16(self, seed, timestep, expected_slice):
        model = self.get_unet_model(model_id="runwayml/stable-diffusion-inpainting", fp16=True)
        latents = self.get_latents(seed, shape=(4, 9, 64, 64), fp16=True)
        encoder_hidden_states = self.get_encoder_hidden_states(seed, fp16=True)

        timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device)

        with torch.no_grad():
            sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample

        assert sample.shape == (4, 4, 64, 64)

        output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
        expected_output_slice = torch.tensor(expected_slice)

        assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)

    @parameterized.expand(
        [
            # fmt: off
            [83, 4, [0.1514, 0.0807, 0.1624, 0.1016, -0.1896, 0.0263, 0.0677, 0.2310]],
            [17, 0.55, [0.1164, -0.0216, 0.0170, 0.1589, -0.3120, 0.1005, -0.0581, -0.1458]],
            [8, 0.89, [-0.1758, -0.0169, 0.1004, -0.1411, 0.1312, 0.1103, -0.1996, 0.2139]],
            [3, 1000, [0.1214, 0.0352, -0.0731, -0.1562, -0.0994, -0.0906, -0.2340, -0.0539]],
            # fmt: on
        ]
    )
Arsalan's avatar
Arsalan committed
1303
    @require_torch_accelerator_with_fp16
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
    def test_stabilityai_sd_v2_fp16(self, seed, timestep, expected_slice):
        model = self.get_unet_model(model_id="stabilityai/stable-diffusion-2", fp16=True)
        latents = self.get_latents(seed, shape=(4, 4, 96, 96), fp16=True)
        encoder_hidden_states = self.get_encoder_hidden_states(seed, shape=(4, 77, 1024), fp16=True)

        timestep = torch.tensor([timestep], dtype=torch.long, device=torch_device)

        with torch.no_grad():
            sample = model(latents, timestep=timestep, encoder_hidden_states=encoder_hidden_states).sample

        assert sample.shape == latents.shape

        output_slice = sample[-1, -2:, -2:, :2].flatten().float().cpu()
        expected_output_slice = torch.tensor(expected_slice)

        assert torch_all_close(output_slice, expected_output_slice, atol=5e-3)