test_pipelines.py 51.5 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# coding=utf-8
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

16
import gc
17
import os
18
import random
19
20
21
22
23
24
25
26
import tempfile
import unittest

import numpy as np
import torch

import PIL
from diffusers import (
27
    AutoencoderKL,
28
29
30
31
32
33
34
35
36
37
38
39
40
    DDIMPipeline,
    DDIMScheduler,
    DDPMPipeline,
    DDPMScheduler,
    KarrasVePipeline,
    KarrasVeScheduler,
    LDMPipeline,
    LDMTextToImagePipeline,
    LMSDiscreteScheduler,
    PNDMPipeline,
    PNDMScheduler,
    ScoreSdeVePipeline,
    ScoreSdeVeScheduler,
41
42
    StableDiffusionImg2ImgPipeline,
    StableDiffusionInpaintPipeline,
43
    StableDiffusionOnnxPipeline,
44
    StableDiffusionPipeline,
45
    UNet2DConditionModel,
46
    UNet2DModel,
47
    VQModel,
48
)
49
from diffusers.modeling_utils import WEIGHTS_NAME
50
from diffusers.pipeline_utils import DiffusionPipeline
51
from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
52
from diffusers.testing_utils import floats_tensor, load_image, slow, torch_device
53
from diffusers.utils import CONFIG_NAME
54
55
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
56
57
58
59
60


torch.backends.cuda.matmul.allow_tf32 = False


hysts's avatar
hysts committed
61
62
63
64
65
66
67
68
69
70
71
72
73
def test_progress_bar(capsys):
    model = UNet2DModel(
        block_out_channels=(32, 64),
        layers_per_block=2,
        sample_size=32,
        in_channels=3,
        out_channels=3,
        down_block_types=("DownBlock2D", "AttnDownBlock2D"),
        up_block_types=("AttnUpBlock2D", "UpBlock2D"),
    )
    scheduler = DDPMScheduler(num_train_timesteps=10)

    ddpm = DDPMPipeline(model, scheduler).to(torch_device)
74
    ddpm(output_type="numpy").images
hysts's avatar
hysts committed
75
76
77
78
    captured = capsys.readouterr()
    assert "10/10" in captured.err, "Progress bar has to be displayed"

    ddpm.set_progress_bar_config(disable=True)
79
    ddpm(output_type="numpy").images
hysts's avatar
hysts committed
80
81
82
83
    captured = capsys.readouterr()
    assert captured.err == "", "Progress bar should be disabled"


84
class PipelineFastTests(unittest.TestCase):
85
86
87
88
89
90
    def tearDown(self):
        # clean up the VRAM after each test
        super().tearDown()
        gc.collect()
        torch.cuda.empty_cache()

91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
    @property
    def dummy_image(self):
        batch_size = 1
        num_channels = 3
        sizes = (32, 32)

        image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device)
        return image

    @property
    def dummy_uncond_unet(self):
        torch.manual_seed(0)
        model = UNet2DModel(
            block_out_channels=(32, 64),
            layers_per_block=2,
            sample_size=32,
            in_channels=3,
            out_channels=3,
            down_block_types=("DownBlock2D", "AttnDownBlock2D"),
            up_block_types=("AttnUpBlock2D", "UpBlock2D"),
        )
        return model

    @property
    def dummy_cond_unet(self):
        torch.manual_seed(0)
        model = UNet2DConditionModel(
            block_out_channels=(32, 64),
            layers_per_block=2,
            sample_size=32,
            in_channels=4,
            out_channels=4,
            down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
            up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
            cross_attention_dim=32,
        )
        return model

    @property
    def dummy_vq_model(self):
        torch.manual_seed(0)
        model = VQModel(
            block_out_channels=[32, 64],
            in_channels=3,
            out_channels=3,
            down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
            up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
            latent_channels=3,
        )
        return model

    @property
    def dummy_vae(self):
        torch.manual_seed(0)
        model = AutoencoderKL(
            block_out_channels=[32, 64],
            in_channels=3,
            out_channels=3,
            down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
            up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
            latent_channels=4,
        )
        return model

    @property
    def dummy_text_encoder(self):
        torch.manual_seed(0)
        config = CLIPTextConfig(
            bos_token_id=0,
            eos_token_id=2,
            hidden_size=32,
            intermediate_size=37,
            layer_norm_eps=1e-05,
            num_attention_heads=4,
            num_hidden_layers=5,
            pad_token_id=1,
            vocab_size=1000,
        )
        return CLIPTextModel(config)

    @property
    def dummy_safety_checker(self):
        def check(images, *args, **kwargs):
174
            return images, [False] * len(images)
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198

        return check

    @property
    def dummy_extractor(self):
        def extract(*args, **kwargs):
            class Out:
                def __init__(self):
                    self.pixel_values = torch.ones([0])

                def to(self, device):
                    self.pixel_values.to(device)
                    return self

            return Out()

        return extract

    def test_ddim(self):
        unet = self.dummy_uncond_unet
        scheduler = DDIMScheduler(tensor_format="pt")

        ddpm = DDIMPipeline(unet=unet, scheduler=scheduler)
        ddpm.to(torch_device)
199
        ddpm.set_progress_bar_config(disable=None)
200

201
202
203
204
        # Warmup pass when using mps (see #372)
        if torch_device == "mps":
            _ = ddpm(num_inference_steps=1)

205
        generator = torch.manual_seed(0)
206
207
208
209
        image = ddpm(generator=generator, num_inference_steps=2, output_type="numpy").images

        generator = torch.manual_seed(0)
        image_from_tuple = ddpm(generator=generator, num_inference_steps=2, output_type="numpy", return_dict=False)[0]
210
211

        image_slice = image[0, -3:, -3:, -1]
212
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
213
214
215
216
217

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array(
            [1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04]
        )
218
219
220
        tolerance = 1e-2 if torch_device != "mps" else 3e-2
        assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance
221
222
223
224
225
226
227

    def test_pndm_cifar10(self):
        unet = self.dummy_uncond_unet
        scheduler = PNDMScheduler(tensor_format="pt")

        pndm = PNDMPipeline(unet=unet, scheduler=scheduler)
        pndm.to(torch_device)
228
        pndm.set_progress_bar_config(disable=None)
229
230
231
232

        generator = torch.manual_seed(0)
        image = pndm(generator=generator, num_inference_steps=20, output_type="numpy").images

233
        generator = torch.manual_seed(0)
234
        image_from_tuple = pndm(generator=generator, num_inference_steps=20, output_type="numpy", return_dict=False)[0]
235
236

        image_slice = image[0, -3:, -3:, -1]
237
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
238
239
240
241

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
242
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
243
244
245
246
247
248
249
250
251
252

    def test_ldm_text2img(self):
        unet = self.dummy_cond_unet
        scheduler = DDIMScheduler(tensor_format="pt")
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        ldm = LDMTextToImagePipeline(vqvae=vae, bert=bert, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
        ldm.to(torch_device)
253
        ldm.set_progress_bar_config(disable=None)
254
255

        prompt = "A painting of a squirrel eating a burger"
256
257
258
259
260
261
262
263

        # Warmup pass when using mps (see #372)
        if torch_device == "mps":
            generator = torch.manual_seed(0)
            _ = ldm([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=1, output_type="numpy")[
                "sample"
            ]

264
265
266
267
268
        generator = torch.manual_seed(0)
        image = ldm([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="numpy")[
            "sample"
        ]

269
270
271
272
273
274
275
276
277
278
        generator = torch.manual_seed(0)
        image_from_tuple = ldm(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="numpy",
            return_dict=False,
        )[0]

279
        image_slice = image[0, -3:, -3:, -1]
280
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
281
282
283
284

        assert image.shape == (1, 64, 64, 3)
        expected_slice = np.array([0.5074, 0.5026, 0.4998, 0.4056, 0.3523, 0.4649, 0.5289, 0.5299, 0.4897])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
285
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
286
287

    def test_stable_diffusion_ddim(self):
288
        device = "cpu"  # ensure determinism for the device-dependent torch.Generator
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
        unet = self.dummy_cond_unet
        scheduler = DDIMScheduler(
            beta_start=0.00085,
            beta_end=0.012,
            beta_schedule="scaled_linear",
            clip_sample=False,
            set_alpha_to_one=False,
        )

        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
312
        sd_pipe = sd_pipe.to(device)
313
        sd_pipe.set_progress_bar_config(disable=None)
314
315

        prompt = "A painting of a squirrel eating a burger"
316

317
318
        generator = torch.Generator(device=device).manual_seed(0)
        output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np")
319
        image = output.images
320

321
322
323
324
325
326
327
328
329
        generator = torch.Generator(device=device).manual_seed(0)
        image_from_tuple = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            return_dict=False,
        )[0]
330
331

        image_slice = image[0, -3:, -3:, -1]
332
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
333
334
335

        assert image.shape == (1, 128, 128, 3)
        expected_slice = np.array([0.5112, 0.4692, 0.4715, 0.5206, 0.4894, 0.5114, 0.5096, 0.4932, 0.4755])
336

337
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
338
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
339
340

    def test_stable_diffusion_pndm(self):
341
        device = "cpu"  # ensure determinism for the device-dependent torch.Generator
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
        unet = self.dummy_cond_unet
        scheduler = PNDMScheduler(tensor_format="pt", skip_prk_steps=True)
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
358
        sd_pipe = sd_pipe.to(device)
359
        sd_pipe.set_progress_bar_config(disable=None)
360
361

        prompt = "A painting of a squirrel eating a burger"
362
363
        generator = torch.Generator(device=device).manual_seed(0)
        output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np")
364

365
366
367
368
369
370
371
372
373
374
375
        image = output.images

        generator = torch.Generator(device=device).manual_seed(0)
        image_from_tuple = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            return_dict=False,
        )[0]
376
377

        image_slice = image[0, -3:, -3:, -1]
378
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
379
380
381
382

        assert image.shape == (1, 128, 128, 3)
        expected_slice = np.array([0.4937, 0.4649, 0.4716, 0.5145, 0.4889, 0.513, 0.513, 0.4905, 0.4738])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
383
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
384
385

    def test_stable_diffusion_k_lms(self):
386
        device = "cpu"  # ensure determinism for the device-dependent torch.Generator
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
        unet = self.dummy_cond_unet
        scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear")
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
403
        sd_pipe = sd_pipe.to(device)
404
        sd_pipe.set_progress_bar_config(disable=None)
405
406

        prompt = "A painting of a squirrel eating a burger"
407
408
        generator = torch.Generator(device=device).manual_seed(0)
        output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np")
409

410
411
412
413
414
415
416
417
418
419
420
        image = output.images

        generator = torch.Generator(device=device).manual_seed(0)
        image_from_tuple = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            return_dict=False,
        )[0]
421
422

        image_slice = image[0, -3:, -3:, -1]
423
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
424
425
426
427

        assert image.shape == (1, 128, 128, 3)
        expected_slice = np.array([0.5067, 0.4689, 0.4614, 0.5233, 0.4903, 0.5112, 0.524, 0.5069, 0.4785])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
428
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
429

430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
    def test_stable_diffusion_attention_chunk(self):
        device = "cpu"  # ensure determinism for the device-dependent torch.Generator
        unet = self.dummy_cond_unet
        scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear")
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
        sd_pipe = sd_pipe.to(device)
        sd_pipe.set_progress_bar_config(disable=None)

        prompt = "A painting of a squirrel eating a burger"
        generator = torch.Generator(device=device).manual_seed(0)
        output_1 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np")

        # make sure chunking the attention yields the same result
        sd_pipe.enable_attention_slicing(slice_size=1)
        generator = torch.Generator(device=device).manual_seed(0)
        output_2 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np")

        assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 1e-4

462
463
464
465
466
467
    def test_score_sde_ve_pipeline(self):
        unet = self.dummy_uncond_unet
        scheduler = ScoreSdeVeScheduler(tensor_format="pt")

        sde_ve = ScoreSdeVePipeline(unet=unet, scheduler=scheduler)
        sde_ve.to(torch_device)
468
        sde_ve.set_progress_bar_config(disable=None)
469

470
471
        generator = torch.manual_seed(0)
        image = sde_ve(num_inference_steps=2, output_type="numpy", generator=generator).images
472

473
474
475
476
        generator = torch.manual_seed(0)
        image_from_tuple = sde_ve(num_inference_steps=2, output_type="numpy", generator=generator, return_dict=False)[
            0
        ]
477
478

        image_slice = image[0, -3:, -3:, -1]
479
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
480
481
482
483

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
484
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
485
486
487
488
489
490
491
492

    def test_ldm_uncond(self):
        unet = self.dummy_uncond_unet
        scheduler = DDIMScheduler(tensor_format="pt")
        vae = self.dummy_vq_model

        ldm = LDMPipeline(unet=unet, vqvae=vae, scheduler=scheduler)
        ldm.to(torch_device)
493
        ldm.set_progress_bar_config(disable=None)
494

495
496
497
498
499
        # Warmup pass when using mps (see #372)
        if torch_device == "mps":
            generator = torch.manual_seed(0)
            _ = ldm(generator=generator, num_inference_steps=1, output_type="numpy").images

500
        generator = torch.manual_seed(0)
501
502
503
504
        image = ldm(generator=generator, num_inference_steps=2, output_type="numpy").images

        generator = torch.manual_seed(0)
        image_from_tuple = ldm(generator=generator, num_inference_steps=2, output_type="numpy", return_dict=False)[0]
505
506

        image_slice = image[0, -3:, -3:, -1]
507
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
508
509
510
511

        assert image.shape == (1, 64, 64, 3)
        expected_slice = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
512
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
513
514
515
516
517
518
519

    def test_karras_ve_pipeline(self):
        unet = self.dummy_uncond_unet
        scheduler = KarrasVeScheduler(tensor_format="pt")

        pipe = KarrasVePipeline(unet=unet, scheduler=scheduler)
        pipe.to(torch_device)
520
        pipe.set_progress_bar_config(disable=None)
521
522

        generator = torch.manual_seed(0)
523
524
525
526
        image = pipe(num_inference_steps=2, generator=generator, output_type="numpy").images

        generator = torch.manual_seed(0)
        image_from_tuple = pipe(num_inference_steps=2, generator=generator, output_type="numpy", return_dict=False)[0]
527
528

        image_slice = image[0, -3:, -3:, -1]
529
530
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]

531
532
533
        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
534
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
535
536

    def test_stable_diffusion_img2img(self):
537
        device = "cpu"  # ensure determinism for the device-dependent torch.Generator
538
539
540
541
542
543
        unet = self.dummy_cond_unet
        scheduler = PNDMScheduler(tensor_format="pt", skip_prk_steps=True)
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

544
        init_image = self.dummy_image.to(device)
545
546
547
548
549
550
551
552
553
554
555

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionImg2ImgPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
556
        sd_pipe = sd_pipe.to(device)
557
        sd_pipe.set_progress_bar_config(disable=None)
558
559

        prompt = "A painting of a squirrel eating a burger"
560
561
562
563
564
565
566
567
568
        generator = torch.Generator(device=device).manual_seed(0)
        output = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
        )
569

570
571
572
573
574
575
576
577
578
579
580
581
        image = output.images

        generator = torch.Generator(device=device).manual_seed(0)
        image_from_tuple = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
            return_dict=False,
        )[0]
582
583

        image_slice = image[0, -3:, -3:, -1]
584
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
585
586
587
588

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([0.4492, 0.3865, 0.4222, 0.5854, 0.5139, 0.4379, 0.4193, 0.48, 0.4218])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
589
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
590

591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
    def test_stable_diffusion_img2img_k_lms(self):
        device = "cpu"  # ensure determinism for the device-dependent torch.Generator
        unet = self.dummy_cond_unet
        scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear")

        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        init_image = self.dummy_image.to(device)

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionImg2ImgPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
        sd_pipe = sd_pipe.to(device)
        sd_pipe.set_progress_bar_config(disable=None)

        prompt = "A painting of a squirrel eating a burger"
        generator = torch.Generator(device=device).manual_seed(0)
        output = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
        )
625
        image = output.images
626

627
628
629
630
631
632
633
634
635
636
637
        generator = torch.Generator(device=device).manual_seed(0)
        output = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
            return_dict=False,
        )
        image_from_tuple = output[0]
638
639

        image_slice = image[0, -3:, -3:, -1]
640
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
641
642
643
644

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([0.4367, 0.4986, 0.4372, 0.6706, 0.5665, 0.444, 0.5864, 0.6019, 0.5203])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
645
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
646

647
    def test_stable_diffusion_inpaint(self):
648
        device = "cpu"  # ensure determinism for the device-dependent torch.Generator
649
650
651
652
653
654
        unet = self.dummy_cond_unet
        scheduler = PNDMScheduler(tensor_format="pt", skip_prk_steps=True)
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

655
        image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0]
656
657
658
659
660
661
662
663
664
665
666
667
668
        init_image = Image.fromarray(np.uint8(image)).convert("RGB")
        mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((128, 128))

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionInpaintPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
669
        sd_pipe = sd_pipe.to(device)
670
        sd_pipe.set_progress_bar_config(disable=None)
671
672

        prompt = "A painting of a squirrel eating a burger"
673
674
675
676
677
678
679
680
681
682
        generator = torch.Generator(device=device).manual_seed(0)
        output = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
            mask_image=mask_image,
        )
683

684
685
686
687
688
689
690
691
692
693
694
695
696
        image = output.images

        generator = torch.Generator(device=device).manual_seed(0)
        image_from_tuple = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
            mask_image=mask_image,
            return_dict=False,
        )[0]
697
698

        image_slice = image[0, -3:, -3:, -1]
699
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
700
701
702
703

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([0.4731, 0.5346, 0.4531, 0.6251, 0.5446, 0.4057, 0.5527, 0.5896, 0.5153])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
704
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
705
706


707
class PipelineTesterMixin(unittest.TestCase):
708
709
710
711
712
713
    def tearDown(self):
        # clean up the VRAM after each test
        super().tearDown()
        gc.collect()
        torch.cuda.empty_cache()

714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
    def test_smart_download(self):
        model_id = "hf-internal-testing/unet-pipeline-dummy"
        with tempfile.TemporaryDirectory() as tmpdirname:
            _ = DiffusionPipeline.from_pretrained(model_id, cache_dir=tmpdirname, force_download=True)
            local_repo_name = "--".join(["models"] + model_id.split("/"))
            snapshot_dir = os.path.join(tmpdirname, local_repo_name, "snapshots")
            snapshot_dir = os.path.join(snapshot_dir, os.listdir(snapshot_dir)[0])

            # inspect all downloaded files to make sure that everything is included
            assert os.path.isfile(os.path.join(snapshot_dir, DiffusionPipeline.config_name))
            assert os.path.isfile(os.path.join(snapshot_dir, CONFIG_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, SCHEDULER_CONFIG_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, WEIGHTS_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, "scheduler", SCHEDULER_CONFIG_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, "unet", WEIGHTS_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, "unet", WEIGHTS_NAME))
            # let's make sure the super large numpy file:
            # https://huggingface.co/hf-internal-testing/unet-pipeline-dummy/blob/main/big_array.npy
            # is not downloaded, but all the expected ones
            assert not os.path.isfile(os.path.join(snapshot_dir, "big_array.npy"))

735
736
737
738
739
740
741
    @property
    def dummy_safety_checker(self):
        def check(images, *args, **kwargs):
            return images, [False] * len(images)

        return check

742
743
744
745
746
747
748
749
750
751
752
753
754
755
    def test_from_pretrained_save_pretrained(self):
        # 1. Load models
        model = UNet2DModel(
            block_out_channels=(32, 64),
            layers_per_block=2,
            sample_size=32,
            in_channels=3,
            out_channels=3,
            down_block_types=("DownBlock2D", "AttnDownBlock2D"),
            up_block_types=("AttnUpBlock2D", "UpBlock2D"),
        )
        schedular = DDPMScheduler(num_train_timesteps=10)

        ddpm = DDPMPipeline(model, schedular)
756
        ddpm.to(torch_device)
757
        ddpm.set_progress_bar_config(disable=None)
758
759
760
761

        with tempfile.TemporaryDirectory() as tmpdirname:
            ddpm.save_pretrained(tmpdirname)
            new_ddpm = DDPMPipeline.from_pretrained(tmpdirname)
762
            new_ddpm.to(torch_device)
763
764

        generator = torch.manual_seed(0)
765
        image = ddpm(generator=generator, output_type="numpy").images
766

767
        generator = generator.manual_seed(0)
768
        new_image = new_ddpm(generator=generator, output_type="numpy").images
769
770
771
772
773
774
775

        assert np.abs(image - new_image).sum() < 1e-5, "Models don't give the same forward pass"

    @slow
    def test_from_pretrained_hub(self):
        model_path = "google/ddpm-cifar10-32"

776
        scheduler = DDPMScheduler(num_train_timesteps=10)
777

778
779
        ddpm = DDPMPipeline.from_pretrained(model_path, scheduler=scheduler)
        ddpm.to(torch_device)
780
        ddpm.set_progress_bar_config(disable=None)
781
782
        ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler)
        ddpm_from_hub.to(torch_device)
783
        ddpm_from_hub.set_progress_bar_config(disable=None)
784
785

        generator = torch.manual_seed(0)
786
        image = ddpm(generator=generator, output_type="numpy").images
787

788
        generator = generator.manual_seed(0)
789
        new_image = ddpm_from_hub(generator=generator, output_type="numpy").images
790
791
792
793
794
795
796

        assert np.abs(image - new_image).sum() < 1e-5, "Models don't give the same forward pass"

    @slow
    def test_from_pretrained_hub_pass_model(self):
        model_path = "google/ddpm-cifar10-32"

797
798
        scheduler = DDPMScheduler(num_train_timesteps=10)

799
800
        # pass unet into DiffusionPipeline
        unet = UNet2DModel.from_pretrained(model_path)
801
802
        ddpm_from_hub_custom_model = DiffusionPipeline.from_pretrained(model_path, unet=unet, scheduler=scheduler)
        ddpm_from_hub_custom_model.to(torch_device)
803
        ddpm_from_hub_custom_model.set_progress_bar_config(disable=None)
804

805
806
        ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler)
        ddpm_from_hub.to(torch_device)
807
        ddpm_from_hub_custom_model.set_progress_bar_config(disable=None)
808
809

        generator = torch.manual_seed(0)
810
        image = ddpm_from_hub_custom_model(generator=generator, output_type="numpy").images
811

812
        generator = generator.manual_seed(0)
813
        new_image = ddpm_from_hub(generator=generator, output_type="numpy").images
814
815
816
817
818
819
820
821

        assert np.abs(image - new_image).sum() < 1e-5, "Models don't give the same forward pass"

    @slow
    def test_output_format(self):
        model_path = "google/ddpm-cifar10-32"

        pipe = DDIMPipeline.from_pretrained(model_path)
822
        pipe.to(torch_device)
823
        pipe.set_progress_bar_config(disable=None)
824
825

        generator = torch.manual_seed(0)
826
        images = pipe(generator=generator, output_type="numpy").images
827
828
829
        assert images.shape == (1, 32, 32, 3)
        assert isinstance(images, np.ndarray)

830
        images = pipe(generator=generator, output_type="pil").images
831
832
833
834
835
        assert isinstance(images, list)
        assert len(images) == 1
        assert isinstance(images[0], PIL.Image.Image)

        # use PIL by default
836
        images = pipe(generator=generator).images
837
838
839
840
841
842
843
844
845
846
847
848
        assert isinstance(images, list)
        assert isinstance(images[0], PIL.Image.Image)

    @slow
    def test_ddpm_cifar10(self):
        model_id = "google/ddpm-cifar10-32"

        unet = UNet2DModel.from_pretrained(model_id)
        scheduler = DDPMScheduler.from_config(model_id)
        scheduler = scheduler.set_format("pt")

        ddpm = DDPMPipeline(unet=unet, scheduler=scheduler)
849
        ddpm.to(torch_device)
850
        ddpm.set_progress_bar_config(disable=None)
851
852

        generator = torch.manual_seed(0)
853
        image = ddpm(generator=generator, output_type="numpy").images
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([0.41995, 0.35885, 0.19385, 0.38475, 0.3382, 0.2647, 0.41545, 0.3582, 0.33845])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    def test_ddim_lsun(self):
        model_id = "google/ddpm-ema-bedroom-256"

        unet = UNet2DModel.from_pretrained(model_id)
        scheduler = DDIMScheduler.from_config(model_id)

        ddpm = DDIMPipeline(unet=unet, scheduler=scheduler)
869
        ddpm.to(torch_device)
870
        ddpm.set_progress_bar_config(disable=None)
871
872

        generator = torch.manual_seed(0)
873
        image = ddpm(generator=generator, output_type="numpy").images
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 256, 256, 3)
        expected_slice = np.array([0.00605, 0.0201, 0.0344, 0.00235, 0.00185, 0.00025, 0.00215, 0.0, 0.00685])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    def test_ddim_cifar10(self):
        model_id = "google/ddpm-cifar10-32"

        unet = UNet2DModel.from_pretrained(model_id)
        scheduler = DDIMScheduler(tensor_format="pt")

        ddim = DDIMPipeline(unet=unet, scheduler=scheduler)
889
        ddim.to(torch_device)
890
        ddim.set_progress_bar_config(disable=None)
891
892

        generator = torch.manual_seed(0)
893
        image = ddim(generator=generator, eta=0.0, output_type="numpy").images
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([0.17235, 0.16175, 0.16005, 0.16255, 0.1497, 0.1513, 0.15045, 0.1442, 0.1453])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    def test_pndm_cifar10(self):
        model_id = "google/ddpm-cifar10-32"

        unet = UNet2DModel.from_pretrained(model_id)
        scheduler = PNDMScheduler(tensor_format="pt")

        pndm = PNDMPipeline(unet=unet, scheduler=scheduler)
909
        pndm.to(torch_device)
910
        pndm.set_progress_bar_config(disable=None)
911
        generator = torch.manual_seed(0)
912
        image = pndm(generator=generator, output_type="numpy").images
913
914
915
916
917
918
919
920
921
922

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    def test_ldm_text2img(self):
        ldm = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256")
923
        ldm.to(torch_device)
924
        ldm.set_progress_bar_config(disable=None)
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940

        prompt = "A painting of a squirrel eating a burger"
        generator = torch.manual_seed(0)
        image = ldm([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=20, output_type="numpy")[
            "sample"
        ]

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 256, 256, 3)
        expected_slice = np.array([0.9256, 0.9340, 0.8933, 0.9361, 0.9113, 0.8727, 0.9122, 0.8745, 0.8099])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    def test_ldm_text2img_fast(self):
        ldm = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256")
941
        ldm.to(torch_device)
942
        ldm.set_progress_bar_config(disable=None)
943
944
945

        prompt = "A painting of a squirrel eating a burger"
        generator = torch.manual_seed(0)
946
        image = ldm(prompt, generator=generator, num_inference_steps=1, output_type="numpy").images
947
948
949
950
951
952
953
954
955
956
957

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 256, 256, 3)
        expected_slice = np.array([0.3163, 0.8670, 0.6465, 0.1865, 0.6291, 0.5139, 0.2824, 0.3723, 0.4344])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
    def test_stable_diffusion(self):
        # make sure here that pndm scheduler skips prk
958
959
        sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1", use_auth_token=True)
        sd_pipe = sd_pipe.to(torch_device)
960
        sd_pipe.set_progress_bar_config(disable=None)
961
962
963
964
965
966
967
968

        prompt = "A painting of a squirrel eating a burger"
        generator = torch.Generator(device=torch_device).manual_seed(0)
        with torch.autocast("cuda"):
            output = sd_pipe(
                [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=20, output_type="np"
            )

969
        image = output.images
970
971
972
973
974
975
976
977
978
979

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 512, 512, 3)
        expected_slice = np.array([0.8887, 0.915, 0.91, 0.894, 0.909, 0.912, 0.919, 0.925, 0.883])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
    def test_stable_diffusion_fast_ddim(self):
980
981
        sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1", use_auth_token=True)
        sd_pipe = sd_pipe.to(torch_device)
982
        sd_pipe.set_progress_bar_config(disable=None)
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997

        scheduler = DDIMScheduler(
            beta_start=0.00085,
            beta_end=0.012,
            beta_schedule="scaled_linear",
            clip_sample=False,
            set_alpha_to_one=False,
        )
        sd_pipe.scheduler = scheduler

        prompt = "A painting of a squirrel eating a burger"
        generator = torch.Generator(device=torch_device).manual_seed(0)

        with torch.autocast("cuda"):
            output = sd_pipe([prompt], generator=generator, num_inference_steps=2, output_type="numpy")
998
        image = output.images
999
1000
1001
1002

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 512, 512, 3)
1003
        expected_slice = np.array([0.9326, 0.923, 0.951, 0.9365, 0.9214, 0.951, 0.9365, 0.9414, 0.918])
1004
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
1005
1006
1007
1008
1009
1010
1011
1012
1013

    @slow
    def test_score_sde_ve_pipeline(self):
        model_id = "google/ncsnpp-church-256"
        model = UNet2DModel.from_pretrained(model_id)

        scheduler = ScoreSdeVeScheduler.from_config(model_id)

        sde_ve = ScoreSdeVePipeline(unet=model, scheduler=scheduler)
1014
        sde_ve.to(torch_device)
1015
        sde_ve.set_progress_bar_config(disable=None)
1016

1017
1018
        generator = torch.manual_seed(0)
        image = sde_ve(num_inference_steps=10, output_type="numpy", generator=generator).images
1019
1020
1021
1022
1023

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 256, 256, 3)

1024
        expected_slice = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0])
1025
1026
1027
1028
1029
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    def test_ldm_uncond(self):
        ldm = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256")
1030
        ldm.to(torch_device)
1031
        ldm.set_progress_bar_config(disable=None)
1032
1033

        generator = torch.manual_seed(0)
1034
        image = ldm(generator=generator, num_inference_steps=5, output_type="numpy").images
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 256, 256, 3)
        expected_slice = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    def test_ddpm_ddim_equality(self):
        model_id = "google/ddpm-cifar10-32"

        unet = UNet2DModel.from_pretrained(model_id)
        ddpm_scheduler = DDPMScheduler(tensor_format="pt")
        ddim_scheduler = DDIMScheduler(tensor_format="pt")

        ddpm = DDPMPipeline(unet=unet, scheduler=ddpm_scheduler)
1051
        ddpm.to(torch_device)
1052
        ddpm.set_progress_bar_config(disable=None)
1053
        ddim = DDIMPipeline(unet=unet, scheduler=ddim_scheduler)
1054
        ddim.to(torch_device)
1055
        ddim.set_progress_bar_config(disable=None)
1056
1057

        generator = torch.manual_seed(0)
1058
        ddpm_image = ddpm(generator=generator, output_type="numpy").images
1059
1060

        generator = torch.manual_seed(0)
1061
        ddim_image = ddim(generator=generator, num_inference_steps=1000, eta=1.0, output_type="numpy").images
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074

        # the values aren't exactly equal, but the images look the same visually
        assert np.abs(ddpm_image - ddim_image).max() < 1e-1

    @unittest.skip("(Anton) The test is failing for large batch sizes, needs investigation")
    def test_ddpm_ddim_equality_batched(self):
        model_id = "google/ddpm-cifar10-32"

        unet = UNet2DModel.from_pretrained(model_id)
        ddpm_scheduler = DDPMScheduler(tensor_format="pt")
        ddim_scheduler = DDIMScheduler(tensor_format="pt")

        ddpm = DDPMPipeline(unet=unet, scheduler=ddpm_scheduler)
1075
        ddpm.to(torch_device)
1076
        ddpm.set_progress_bar_config(disable=None)
1077

1078
        ddim = DDIMPipeline(unet=unet, scheduler=ddim_scheduler)
1079
        ddim.to(torch_device)
1080
        ddim.set_progress_bar_config(disable=None)
1081
1082

        generator = torch.manual_seed(0)
1083
        ddpm_images = ddpm(batch_size=4, generator=generator, output_type="numpy").images
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099

        generator = torch.manual_seed(0)
        ddim_images = ddim(batch_size=4, generator=generator, num_inference_steps=1000, eta=1.0, output_type="numpy")[
            "sample"
        ]

        # the values aren't exactly equal, but the images look the same visually
        assert np.abs(ddpm_images - ddim_images).max() < 1e-1

    @slow
    def test_karras_ve_pipeline(self):
        model_id = "google/ncsnpp-celebahq-256"
        model = UNet2DModel.from_pretrained(model_id)
        scheduler = KarrasVeScheduler(tensor_format="pt")

        pipe = KarrasVePipeline(unet=model, scheduler=scheduler)
1100
        pipe.to(torch_device)
1101
        pipe.set_progress_bar_config(disable=None)
1102
1103

        generator = torch.manual_seed(0)
1104
        image = pipe(num_inference_steps=20, generator=generator, output_type="numpy").images
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115

        image_slice = image[0, -3:, -3:, -1]
        assert image.shape == (1, 256, 256, 3)
        expected_slice = np.array([0.26815, 0.1581, 0.2658, 0.23248, 0.1550, 0.2539, 0.1131, 0.1024, 0.0837])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
    def test_lms_stable_diffusion_pipeline(self):
        model_id = "CompVis/stable-diffusion-v1-1"
        pipe = StableDiffusionPipeline.from_pretrained(model_id, use_auth_token=True).to(torch_device)
1116
        pipe.set_progress_bar_config(disable=None)
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
        scheduler = LMSDiscreteScheduler.from_config(model_id, subfolder="scheduler", use_auth_token=True)
        pipe.scheduler = scheduler

        prompt = "a photograph of an astronaut riding a horse"
        generator = torch.Generator(device=torch_device).manual_seed(0)
        image = pipe([prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="numpy")[
            "sample"
        ]

        image_slice = image[0, -3:, -3:, -1]
        assert image.shape == (1, 512, 512, 3)
        expected_slice = np.array([0.9077, 0.9254, 0.9181, 0.9227, 0.9213, 0.9367, 0.9399, 0.9406, 0.9024])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
1130
1131
1132

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
    def test_stable_diffusion_memory_chunking(self):
        torch.cuda.reset_peak_memory_stats()
        model_id = "CompVis/stable-diffusion-v1-4"
        pipe = StableDiffusionPipeline.from_pretrained(
            model_id, revision="fp16", torch_dtype=torch.float16, use_auth_token=True
        ).to(torch_device)
        pipe.set_progress_bar_config(disable=None)

        prompt = "a photograph of an astronaut riding a horse"

        # make attention efficient
        pipe.enable_attention_slicing()
        generator = torch.Generator(device=torch_device).manual_seed(0)
        with torch.autocast(torch_device):
            output_chunked = pipe(
                [prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="numpy"
            )
            image_chunked = output_chunked.images

        mem_bytes = torch.cuda.max_memory_allocated()
        torch.cuda.reset_peak_memory_stats()
        # make sure that less than 3.75 GB is allocated
        assert mem_bytes < 3.75 * 10**9

        # disable chunking
        pipe.disable_attention_slicing()
        generator = torch.Generator(device=torch_device).manual_seed(0)
        with torch.autocast(torch_device):
            output = pipe(
                [prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="numpy"
            )
            image = output.images

        # make sure that more than 3.75 GB is allocated
        mem_bytes = torch.cuda.max_memory_allocated()
        assert mem_bytes > 3.75 * 10**9
        assert np.abs(image_chunked.flatten() - image.flatten()).max() < 1e-3

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
1173
1174
1175
1176
    def test_stable_diffusion_text2img_pipeline(self):
        expected_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/text2img/astronaut_riding_a_horse.png"
1177
        )
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
        expected_image = np.array(expected_image, dtype=np.float32) / 255.0

        model_id = "CompVis/stable-diffusion-v1-4"
        pipe = StableDiffusionPipeline.from_pretrained(
            model_id,
            safety_checker=self.dummy_safety_checker,
            use_auth_token=True,
        )
        pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        pipe.enable_attention_slicing()

        prompt = "astronaut riding a horse"

        generator = torch.Generator(device=torch_device).manual_seed(0)
        output = pipe(prompt=prompt, strength=0.75, guidance_scale=7.5, generator=generator, output_type="np")
        image = output.images[0]
1195

1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
        assert image.shape == (512, 512, 3)
        assert np.abs(expected_image - image).max() < 1e-2

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
    def test_stable_diffusion_img2img_pipeline(self):
        init_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/img2img/sketch-mountains-input.jpg"
        )
        expected_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/img2img/fantasy_landscape.png"
        )
        init_image = init_image.resize((768, 512))
        expected_image = np.array(expected_image, dtype=np.float32) / 255.0
1212
1213

        model_id = "CompVis/stable-diffusion-v1-4"
1214
1215
        pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
            model_id,
1216
            safety_checker=self.dummy_safety_checker,
1217
1218
            use_auth_token=True,
        )
1219
        pipe.to(torch_device)
1220
        pipe.set_progress_bar_config(disable=None)
1221
        pipe.enable_attention_slicing()
1222
1223
1224
1225

        prompt = "A fantasy landscape, trending on artstation"

        generator = torch.Generator(device=torch_device).manual_seed(0)
1226
1227
1228
1229
1230
1231
1232
1233
        output = pipe(
            prompt=prompt,
            init_image=init_image,
            strength=0.75,
            guidance_scale=7.5,
            generator=generator,
            output_type="np",
        )
1234
        image = output.images[0]
1235

1236
        assert image.shape == (512, 768, 3)
1237
1238
        # img2img is flaky across GPUs even in fp32, so using MAE here
        assert np.abs(expected_image - image).mean() < 1e-2
1239
1240
1241

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
1242
    def test_stable_diffusion_img2img_pipeline_k_lms(self):
1243
1244
1245
        init_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/img2img/sketch-mountains-input.jpg"
1246
        )
1247
1248
1249
1250
1251
1252
        expected_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/img2img/fantasy_landscape_k_lms.png"
        )
        init_image = init_image.resize((768, 512))
        expected_image = np.array(expected_image, dtype=np.float32) / 255.0
1253
1254
1255
1256

        lms = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear")

        model_id = "CompVis/stable-diffusion-v1-4"
1257
1258
1259
        pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
            model_id,
            scheduler=lms,
1260
            safety_checker=self.dummy_safety_checker,
1261
1262
            use_auth_token=True,
        )
1263
1264
        pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
1265
        pipe.enable_attention_slicing()
1266
1267
1268
1269

        prompt = "A fantasy landscape, trending on artstation"

        generator = torch.Generator(device=torch_device).manual_seed(0)
1270
1271
1272
1273
1274
1275
1276
1277
        output = pipe(
            prompt=prompt,
            init_image=init_image,
            strength=0.75,
            guidance_scale=7.5,
            generator=generator,
            output_type="np",
        )
1278
        image = output.images[0]
1279

1280
        assert image.shape == (512, 768, 3)
1281
1282
        # img2img is flaky across GPUs even in fp32, so using MAE here
        assert np.abs(expected_image - image).mean() < 1e-2
1283
1284
1285

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
1286
    def test_stable_diffusion_inpaint_pipeline(self):
1287
1288
1289
        init_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/in_paint/overture-creations-5sI6fQgYIuo.png"
1290
        )
1291
1292
1293
1294
1295
1296
1297
1298
1299
        mask_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/in_paint/overture-creations-5sI6fQgYIuo_mask.png"
        )
        expected_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/in_paint/red_cat_sitting_on_a_park_bench.png"
        )
        expected_image = np.array(expected_image, dtype=np.float32) / 255.0
1300
1301

        model_id = "CompVis/stable-diffusion-v1-4"
1302
1303
        pipe = StableDiffusionInpaintPipeline.from_pretrained(
            model_id,
1304
            safety_checker=self.dummy_safety_checker,
1305
1306
            use_auth_token=True,
        )
1307
        pipe.to(torch_device)
1308
        pipe.set_progress_bar_config(disable=None)
1309
        pipe.enable_attention_slicing()
1310

1311
        prompt = "A red cat sitting on a park bench"
1312
1313

        generator = torch.Generator(device=torch_device).manual_seed(0)
1314
1315
1316
1317
1318
1319
1320
1321
1322
        output = pipe(
            prompt=prompt,
            init_image=init_image,
            mask_image=mask_image,
            strength=0.75,
            guidance_scale=7.5,
            generator=generator,
            output_type="np",
        )
1323
        image = output.images[0]
1324

1325
1326
        assert image.shape == (512, 512, 3)
        assert np.abs(expected_image - image).max() < 1e-2
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346

    @slow
    def test_stable_diffusion_onnx(self):
        from scripts.convert_stable_diffusion_checkpoint_to_onnx import convert_models

        with tempfile.TemporaryDirectory() as tmpdirname:
            convert_models("CompVis/stable-diffusion-v1-4", tmpdirname, opset=14)

            sd_pipe = StableDiffusionOnnxPipeline.from_pretrained(tmpdirname, provider="CUDAExecutionProvider")

        prompt = "A painting of a squirrel eating a burger"
        np.random.seed(0)
        output = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=20, output_type="np")
        image = output.images

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 512, 512, 3)
        expected_slice = np.array([0.0385, 0.0252, 0.0234, 0.0287, 0.0358, 0.0287, 0.0276, 0.0235, 0.0010])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3