test_pipelines.py 72.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# coding=utf-8
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

16
import gc
17
import os
18
import random
19
20
21
22
23
24
25
26
import tempfile
import unittest

import numpy as np
import torch

import PIL
from diffusers import (
27
    AutoencoderKL,
28
29
30
31
32
33
34
35
36
37
38
39
40
    DDIMPipeline,
    DDIMScheduler,
    DDPMPipeline,
    DDPMScheduler,
    KarrasVePipeline,
    KarrasVeScheduler,
    LDMPipeline,
    LDMTextToImagePipeline,
    LMSDiscreteScheduler,
    PNDMPipeline,
    PNDMScheduler,
    ScoreSdeVePipeline,
    ScoreSdeVeScheduler,
41
42
    StableDiffusionImg2ImgPipeline,
    StableDiffusionInpaintPipeline,
43
    StableDiffusionOnnxPipeline,
44
    StableDiffusionPipeline,
45
    UNet2DConditionModel,
46
    UNet2DModel,
47
    VQModel,
48
49
)
from diffusers.pipeline_utils import DiffusionPipeline
50
from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
51
from diffusers.utils import CONFIG_NAME, WEIGHTS_NAME, floats_tensor, load_image, slow, torch_device
Patrick von Platen's avatar
Patrick von Platen committed
52
from diffusers.utils.testing_utils import get_tests_dir
53
from PIL import Image
Patrick von Platen's avatar
Patrick von Platen committed
54
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextConfig, CLIPTextModel, CLIPTokenizer
55
56
57
58
59


torch.backends.cuda.matmul.allow_tf32 = False


hysts's avatar
hysts committed
60
61
62
63
64
65
66
67
68
69
70
71
72
def test_progress_bar(capsys):
    model = UNet2DModel(
        block_out_channels=(32, 64),
        layers_per_block=2,
        sample_size=32,
        in_channels=3,
        out_channels=3,
        down_block_types=("DownBlock2D", "AttnDownBlock2D"),
        up_block_types=("AttnUpBlock2D", "UpBlock2D"),
    )
    scheduler = DDPMScheduler(num_train_timesteps=10)

    ddpm = DDPMPipeline(model, scheduler).to(torch_device)
73
    ddpm(output_type="numpy").images
hysts's avatar
hysts committed
74
75
76
77
    captured = capsys.readouterr()
    assert "10/10" in captured.err, "Progress bar has to be displayed"

    ddpm.set_progress_bar_config(disable=True)
78
    ddpm(output_type="numpy").images
hysts's avatar
hysts committed
79
80
81
82
    captured = capsys.readouterr()
    assert captured.err == "", "Progress bar should be disabled"


Patrick von Platen's avatar
Patrick von Platen committed
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
class CustomPipelineTests(unittest.TestCase):
    def test_load_custom_pipeline(self):
        pipeline = DiffusionPipeline.from_pretrained(
            "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline"
        )
        # NOTE that `"CustomPipeline"` is not a class that is defined in this library, but solely on the Hub
        # under https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py#L24
        assert pipeline.__class__.__name__ == "CustomPipeline"

    def test_run_custom_pipeline(self):
        pipeline = DiffusionPipeline.from_pretrained(
            "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline"
        )
        images, output_str = pipeline(num_inference_steps=2, output_type="np")

        assert images[0].shape == (1, 32, 32, 3)
        # compare output to https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py#L102
        assert output_str == "This is a test"

    def test_local_custom_pipeline(self):
        local_custom_pipeline_path = get_tests_dir("fixtures/custom_pipeline")
        pipeline = DiffusionPipeline.from_pretrained(
            "google/ddpm-cifar10-32", custom_pipeline=local_custom_pipeline_path
        )
        images, output_str = pipeline(num_inference_steps=2, output_type="np")

        assert pipeline.__class__.__name__ == "CustomLocalPipeline"
        assert images[0].shape == (1, 32, 32, 3)
        # compare to https://github.com/huggingface/diffusers/blob/main/tests/fixtures/custom_pipeline/pipeline.py#L102
        assert output_str == "This is a local test"

    @slow
    def test_load_pipeline_from_git(self):
        clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"

        feature_extractor = CLIPFeatureExtractor.from_pretrained(clip_model_id)
        clip_model = CLIPModel.from_pretrained(clip_model_id)

        pipeline = DiffusionPipeline.from_pretrained(
            "CompVis/stable-diffusion-v1-4",
            custom_pipeline="clip_guided_stable_diffusion",
            clip_model=clip_model,
            feature_extractor=feature_extractor,
        )
        pipeline = pipeline.to(torch_device)

        # NOTE that `"CLIPGuidedStableDiffusion"` is not a class that is defined in the pypi package of th e library, but solely on the community examples folder of GitHub under:
        # https://github.com/huggingface/diffusers/blob/main/examples/community/clip_guided_stable_diffusion.py
        assert pipeline.__class__.__name__ == "CLIPGuidedStableDiffusion"

        image = pipeline("a prompt", num_inference_steps=2, output_type="np").images[0]
        assert image.shape == (512, 512, 3)


137
class PipelineFastTests(unittest.TestCase):
138
139
140
141
142
143
    def tearDown(self):
        # clean up the VRAM after each test
        super().tearDown()
        gc.collect()
        torch.cuda.empty_cache()

144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
    @property
    def dummy_image(self):
        batch_size = 1
        num_channels = 3
        sizes = (32, 32)

        image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device)
        return image

    @property
    def dummy_uncond_unet(self):
        torch.manual_seed(0)
        model = UNet2DModel(
            block_out_channels=(32, 64),
            layers_per_block=2,
            sample_size=32,
            in_channels=3,
            out_channels=3,
            down_block_types=("DownBlock2D", "AttnDownBlock2D"),
            up_block_types=("AttnUpBlock2D", "UpBlock2D"),
        )
        return model

    @property
    def dummy_cond_unet(self):
        torch.manual_seed(0)
        model = UNet2DConditionModel(
            block_out_channels=(32, 64),
            layers_per_block=2,
            sample_size=32,
            in_channels=4,
            out_channels=4,
            down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
            up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
            cross_attention_dim=32,
        )
        return model

    @property
    def dummy_vq_model(self):
        torch.manual_seed(0)
        model = VQModel(
            block_out_channels=[32, 64],
            in_channels=3,
            out_channels=3,
            down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
            up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
            latent_channels=3,
        )
        return model

    @property
    def dummy_vae(self):
        torch.manual_seed(0)
        model = AutoencoderKL(
            block_out_channels=[32, 64],
            in_channels=3,
            out_channels=3,
            down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
            up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
            latent_channels=4,
        )
        return model

    @property
    def dummy_text_encoder(self):
        torch.manual_seed(0)
        config = CLIPTextConfig(
            bos_token_id=0,
            eos_token_id=2,
            hidden_size=32,
            intermediate_size=37,
            layer_norm_eps=1e-05,
            num_attention_heads=4,
            num_hidden_layers=5,
            pad_token_id=1,
            vocab_size=1000,
        )
        return CLIPTextModel(config)

    @property
    def dummy_safety_checker(self):
        def check(images, *args, **kwargs):
227
            return images, [False] * len(images)
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245

        return check

    @property
    def dummy_extractor(self):
        def extract(*args, **kwargs):
            class Out:
                def __init__(self):
                    self.pixel_values = torch.ones([0])

                def to(self, device):
                    self.pixel_values.to(device)
                    return self

            return Out()

        return extract

246
247
248
249
250
251
252
253
254
255
256
    def test_pipeline_fp16_cpu_error(self):
        model = self.dummy_uncond_unet
        scheduler = DDPMScheduler(num_train_timesteps=10)
        pipe = DDIMPipeline(model.half(), scheduler)

        if str(torch_device) in ["cpu", "mps"]:
            self.assertRaises(ValueError, pipe.to, torch_device)
        else:
            # moving the pipeline to GPU should work
            pipe.to(torch_device)

257
258
    def test_ddim(self):
        unet = self.dummy_uncond_unet
259
        scheduler = DDIMScheduler()
260
261
262

        ddpm = DDIMPipeline(unet=unet, scheduler=scheduler)
        ddpm.to(torch_device)
263
        ddpm.set_progress_bar_config(disable=None)
264

265
266
267
268
        # Warmup pass when using mps (see #372)
        if torch_device == "mps":
            _ = ddpm(num_inference_steps=1)

269
        generator = torch.manual_seed(0)
270
271
272
273
        image = ddpm(generator=generator, num_inference_steps=2, output_type="numpy").images

        generator = torch.manual_seed(0)
        image_from_tuple = ddpm(generator=generator, num_inference_steps=2, output_type="numpy", return_dict=False)[0]
274
275

        image_slice = image[0, -3:, -3:, -1]
276
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
277
278
279
280
281

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array(
            [1.000e00, 5.717e-01, 4.717e-01, 1.000e00, 0.000e00, 1.000e00, 3.000e-04, 0.000e00, 9.000e-04]
        )
282
283
284
        tolerance = 1e-2 if torch_device != "mps" else 3e-2
        assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance
285
286
287

    def test_pndm_cifar10(self):
        unet = self.dummy_uncond_unet
288
        scheduler = PNDMScheduler()
289
290
291

        pndm = PNDMPipeline(unet=unet, scheduler=scheduler)
        pndm.to(torch_device)
292
        pndm.set_progress_bar_config(disable=None)
293
294
295
296

        generator = torch.manual_seed(0)
        image = pndm(generator=generator, num_inference_steps=20, output_type="numpy").images

297
        generator = torch.manual_seed(0)
298
        image_from_tuple = pndm(generator=generator, num_inference_steps=20, output_type="numpy", return_dict=False)[0]
299
300

        image_slice = image[0, -3:, -3:, -1]
301
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
302
303
304
305

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([1.0, 1.0, 0.0, 1.0, 0.0, 1.0, 0.0, 0.0, 0.0])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
306
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
307
308
309

    def test_ldm_text2img(self):
        unet = self.dummy_cond_unet
310
        scheduler = DDIMScheduler()
311
312
313
314
315
316
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        ldm = LDMTextToImagePipeline(vqvae=vae, bert=bert, tokenizer=tokenizer, unet=unet, scheduler=scheduler)
        ldm.to(torch_device)
317
        ldm.set_progress_bar_config(disable=None)
318
319

        prompt = "A painting of a squirrel eating a burger"
320
321
322
323
324
325
326
327

        # Warmup pass when using mps (see #372)
        if torch_device == "mps":
            generator = torch.manual_seed(0)
            _ = ldm([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=1, output_type="numpy")[
                "sample"
            ]

328
329
330
331
332
        generator = torch.manual_seed(0)
        image = ldm([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="numpy")[
            "sample"
        ]

333
334
335
336
337
338
339
340
341
342
        generator = torch.manual_seed(0)
        image_from_tuple = ldm(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="numpy",
            return_dict=False,
        )[0]

343
        image_slice = image[0, -3:, -3:, -1]
344
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
345
346
347
348

        assert image.shape == (1, 64, 64, 3)
        expected_slice = np.array([0.5074, 0.5026, 0.4998, 0.4056, 0.3523, 0.4649, 0.5289, 0.5299, 0.4897])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
349
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
350
351

    def test_stable_diffusion_ddim(self):
352
        device = "cpu"  # ensure determinism for the device-dependent torch.Generator
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
        unet = self.dummy_cond_unet
        scheduler = DDIMScheduler(
            beta_start=0.00085,
            beta_end=0.012,
            beta_schedule="scaled_linear",
            clip_sample=False,
            set_alpha_to_one=False,
        )

        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
376
        sd_pipe = sd_pipe.to(device)
377
        sd_pipe.set_progress_bar_config(disable=None)
378
379

        prompt = "A painting of a squirrel eating a burger"
380

381
382
        generator = torch.Generator(device=device).manual_seed(0)
        output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np")
383
        image = output.images
384

385
386
387
388
389
390
391
392
393
        generator = torch.Generator(device=device).manual_seed(0)
        image_from_tuple = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            return_dict=False,
        )[0]
394
395

        image_slice = image[0, -3:, -3:, -1]
396
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
397
398
399

        assert image.shape == (1, 128, 128, 3)
        expected_slice = np.array([0.5112, 0.4692, 0.4715, 0.5206, 0.4894, 0.5114, 0.5096, 0.4932, 0.4755])
400

401
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
402
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
403

404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
    def test_stable_diffusion_ddim_factor_8(self):
        device = "cpu"  # ensure determinism for the device-dependent torch.Generator
        unet = self.dummy_cond_unet
        scheduler = DDIMScheduler(
            beta_start=0.00085,
            beta_end=0.012,
            beta_schedule="scaled_linear",
            clip_sample=False,
            set_alpha_to_one=False,
        )

        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
        sd_pipe = sd_pipe.to(device)
        sd_pipe.set_progress_bar_config(disable=None)

        prompt = "A painting of a squirrel eating a burger"

        generator = torch.Generator(device=device).manual_seed(0)
        output = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            height=536,
            width=536,
            num_inference_steps=2,
            output_type="np",
        )
        image = output.images

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 134, 134, 3)
        expected_slice = np.array([0.7834, 0.5488, 0.5781, 0.46, 0.3609, 0.5369, 0.542, 0.4855, 0.5557])

        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

453
    def test_stable_diffusion_pndm(self):
454
        device = "cpu"  # ensure determinism for the device-dependent torch.Generator
455
        unet = self.dummy_cond_unet
456
        scheduler = PNDMScheduler(skip_prk_steps=True)
457
458
459
460
461
462
463
464
465
466
467
468
469
470
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
471
        sd_pipe = sd_pipe.to(device)
472
        sd_pipe.set_progress_bar_config(disable=None)
473
474

        prompt = "A painting of a squirrel eating a burger"
475
476
        generator = torch.Generator(device=device).manual_seed(0)
        output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np")
477

478
479
480
481
482
483
484
485
486
487
488
        image = output.images

        generator = torch.Generator(device=device).manual_seed(0)
        image_from_tuple = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            return_dict=False,
        )[0]
489
490

        image_slice = image[0, -3:, -3:, -1]
491
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
492
493
494
495

        assert image.shape == (1, 128, 128, 3)
        expected_slice = np.array([0.4937, 0.4649, 0.4716, 0.5145, 0.4889, 0.513, 0.513, 0.4905, 0.4738])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
496
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
497
498

    def test_stable_diffusion_k_lms(self):
499
        device = "cpu"  # ensure determinism for the device-dependent torch.Generator
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
        unet = self.dummy_cond_unet
        scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear")
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
516
        sd_pipe = sd_pipe.to(device)
517
        sd_pipe.set_progress_bar_config(disable=None)
518
519

        prompt = "A painting of a squirrel eating a burger"
520
521
        generator = torch.Generator(device=device).manual_seed(0)
        output = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np")
522

523
524
525
526
527
528
529
530
531
532
533
        image = output.images

        generator = torch.Generator(device=device).manual_seed(0)
        image_from_tuple = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            return_dict=False,
        )[0]
534
535

        image_slice = image[0, -3:, -3:, -1]
536
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
537
538
539
540

        assert image.shape == (1, 128, 128, 3)
        expected_slice = np.array([0.5067, 0.4689, 0.4614, 0.5233, 0.4903, 0.5112, 0.524, 0.5069, 0.4785])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
541
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
542

543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
    def test_stable_diffusion_attention_chunk(self):
        device = "cpu"  # ensure determinism for the device-dependent torch.Generator
        unet = self.dummy_cond_unet
        scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear")
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
        sd_pipe = sd_pipe.to(device)
        sd_pipe.set_progress_bar_config(disable=None)

        prompt = "A painting of a squirrel eating a burger"
        generator = torch.Generator(device=device).manual_seed(0)
        output_1 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np")

        # make sure chunking the attention yields the same result
        sd_pipe.enable_attention_slicing(slice_size=1)
        generator = torch.Generator(device=device).manual_seed(0)
        output_2 = sd_pipe([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=2, output_type="np")

        assert np.abs(output_2.images.flatten() - output_1.images.flatten()).max() < 1e-4

575
576
    def test_score_sde_ve_pipeline(self):
        unet = self.dummy_uncond_unet
577
        scheduler = ScoreSdeVeScheduler()
578
579
580

        sde_ve = ScoreSdeVePipeline(unet=unet, scheduler=scheduler)
        sde_ve.to(torch_device)
581
        sde_ve.set_progress_bar_config(disable=None)
582

583
584
        generator = torch.manual_seed(0)
        image = sde_ve(num_inference_steps=2, output_type="numpy", generator=generator).images
585

586
587
588
589
        generator = torch.manual_seed(0)
        image_from_tuple = sde_ve(num_inference_steps=2, output_type="numpy", generator=generator, return_dict=False)[
            0
        ]
590
591

        image_slice = image[0, -3:, -3:, -1]
592
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
593
594
595
596

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
597
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
598
599
600

    def test_ldm_uncond(self):
        unet = self.dummy_uncond_unet
601
        scheduler = DDIMScheduler()
602
603
604
605
        vae = self.dummy_vq_model

        ldm = LDMPipeline(unet=unet, vqvae=vae, scheduler=scheduler)
        ldm.to(torch_device)
606
        ldm.set_progress_bar_config(disable=None)
607

608
609
610
611
612
        # Warmup pass when using mps (see #372)
        if torch_device == "mps":
            generator = torch.manual_seed(0)
            _ = ldm(generator=generator, num_inference_steps=1, output_type="numpy").images

613
        generator = torch.manual_seed(0)
614
615
616
617
        image = ldm(generator=generator, num_inference_steps=2, output_type="numpy").images

        generator = torch.manual_seed(0)
        image_from_tuple = ldm(generator=generator, num_inference_steps=2, output_type="numpy", return_dict=False)[0]
618
619

        image_slice = image[0, -3:, -3:, -1]
620
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
621
622
623
624

        assert image.shape == (1, 64, 64, 3)
        expected_slice = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
625
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
626
627
628

    def test_karras_ve_pipeline(self):
        unet = self.dummy_uncond_unet
629
        scheduler = KarrasVeScheduler()
630
631
632

        pipe = KarrasVePipeline(unet=unet, scheduler=scheduler)
        pipe.to(torch_device)
633
        pipe.set_progress_bar_config(disable=None)
634
635

        generator = torch.manual_seed(0)
636
637
638
639
        image = pipe(num_inference_steps=2, generator=generator, output_type="numpy").images

        generator = torch.manual_seed(0)
        image_from_tuple = pipe(num_inference_steps=2, generator=generator, output_type="numpy", return_dict=False)[0]
640
641

        image_slice = image[0, -3:, -3:, -1]
642
643
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]

644
645
646
        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
647
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
648
649

    def test_stable_diffusion_img2img(self):
650
        device = "cpu"  # ensure determinism for the device-dependent torch.Generator
651
        unet = self.dummy_cond_unet
652
        scheduler = PNDMScheduler(skip_prk_steps=True)
653
654
655
656
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

657
        init_image = self.dummy_image.to(device)
658
659
660
661
662
663
664
665
666
667
668

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionImg2ImgPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
669
        sd_pipe = sd_pipe.to(device)
670
        sd_pipe.set_progress_bar_config(disable=None)
671
672

        prompt = "A painting of a squirrel eating a burger"
673
674
675
676
677
678
679
680
681
        generator = torch.Generator(device=device).manual_seed(0)
        output = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
        )
682

683
684
685
686
687
688
689
690
691
692
693
694
        image = output.images

        generator = torch.Generator(device=device).manual_seed(0)
        image_from_tuple = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
            return_dict=False,
        )[0]
695
696

        image_slice = image[0, -3:, -3:, -1]
697
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
698
699
700
701

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([0.4492, 0.3865, 0.4222, 0.5854, 0.5139, 0.4379, 0.4193, 0.48, 0.4218])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
702
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
703

704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
    def test_stable_diffusion_img2img_k_lms(self):
        device = "cpu"  # ensure determinism for the device-dependent torch.Generator
        unet = self.dummy_cond_unet
        scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear")

        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        init_image = self.dummy_image.to(device)

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionImg2ImgPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
        sd_pipe = sd_pipe.to(device)
        sd_pipe.set_progress_bar_config(disable=None)

        prompt = "A painting of a squirrel eating a burger"
        generator = torch.Generator(device=device).manual_seed(0)
        output = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
        )
738
        image = output.images
739

740
741
742
743
744
745
746
747
748
749
750
        generator = torch.Generator(device=device).manual_seed(0)
        output = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
            return_dict=False,
        )
        image_from_tuple = output[0]
751
752

        image_slice = image[0, -3:, -3:, -1]
753
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
754
755
756
757

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([0.4367, 0.4986, 0.4372, 0.6706, 0.5665, 0.444, 0.5864, 0.6019, 0.5203])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
758
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
759

760
    def test_stable_diffusion_inpaint(self):
761
        device = "cpu"  # ensure determinism for the device-dependent torch.Generator
762
        unet = self.dummy_cond_unet
763
        scheduler = PNDMScheduler(skip_prk_steps=True)
764
765
766
767
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

768
        image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0]
769
770
771
772
773
774
775
776
777
778
779
780
781
        init_image = Image.fromarray(np.uint8(image)).convert("RGB")
        mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((128, 128))

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionInpaintPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
782
        sd_pipe = sd_pipe.to(device)
783
        sd_pipe.set_progress_bar_config(disable=None)
784
785

        prompt = "A painting of a squirrel eating a burger"
786
787
788
789
790
791
792
793
794
795
        generator = torch.Generator(device=device).manual_seed(0)
        output = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
            mask_image=mask_image,
        )
796

797
798
799
800
801
802
803
804
805
806
807
808
809
        image = output.images

        generator = torch.Generator(device=device).manual_seed(0)
        image_from_tuple = sd_pipe(
            [prompt],
            generator=generator,
            guidance_scale=6.0,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
            mask_image=mask_image,
            return_dict=False,
        )[0]
810
811

        image_slice = image[0, -3:, -3:, -1]
812
        image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
813
814
815
816

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([0.4731, 0.5346, 0.4531, 0.6251, 0.5446, 0.4057, 0.5527, 0.5896, 0.5153])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
817
        assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-2
818

819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
    def test_stable_diffusion_num_images_per_prompt(self):
        device = "cpu"  # ensure determinism for the device-dependent torch.Generator
        unet = self.dummy_cond_unet
        scheduler = PNDMScheduler(skip_prk_steps=True)
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
        sd_pipe = sd_pipe.to(device)
        sd_pipe.set_progress_bar_config(disable=None)

        prompt = "A painting of a squirrel eating a burger"

        # test num_images_per_prompt=1 (default)
        images = sd_pipe(prompt, num_inference_steps=2, output_type="np").images

        assert images.shape == (1, 128, 128, 3)

        # test num_images_per_prompt=1 (default) for batch of prompts
        batch_size = 2
        images = sd_pipe([prompt] * batch_size, num_inference_steps=2, output_type="np").images

        assert images.shape == (batch_size, 128, 128, 3)

        # test num_images_per_prompt for single prompt
        num_images_per_prompt = 2
        images = sd_pipe(
            prompt, num_inference_steps=2, output_type="np", num_images_per_prompt=num_images_per_prompt
        ).images

        assert images.shape == (num_images_per_prompt, 128, 128, 3)

        # test num_images_per_prompt for batch of prompts
        batch_size = 2
        images = sd_pipe(
            [prompt] * batch_size, num_inference_steps=2, output_type="np", num_images_per_prompt=num_images_per_prompt
        ).images

        assert images.shape == (batch_size * num_images_per_prompt, 128, 128, 3)

    def test_stable_diffusion_img2img_num_images_per_prompt(self):
        device = "cpu"
        unet = self.dummy_cond_unet
        scheduler = PNDMScheduler(skip_prk_steps=True)
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        init_image = self.dummy_image.to(device)

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionImg2ImgPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
        sd_pipe = sd_pipe.to(device)
        sd_pipe.set_progress_bar_config(disable=None)

        prompt = "A painting of a squirrel eating a burger"

        # test num_images_per_prompt=1 (default)
        images = sd_pipe(
            prompt,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
        ).images

        assert images.shape == (1, 32, 32, 3)

        # test num_images_per_prompt=1 (default) for batch of prompts
        batch_size = 2
        images = sd_pipe(
            [prompt] * batch_size,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
        ).images

        assert images.shape == (batch_size, 32, 32, 3)

        # test num_images_per_prompt for single prompt
        num_images_per_prompt = 2
        images = sd_pipe(
            prompt,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
            num_images_per_prompt=num_images_per_prompt,
        ).images

        assert images.shape == (num_images_per_prompt, 32, 32, 3)

        # test num_images_per_prompt for batch of prompts
        batch_size = 2
        images = sd_pipe(
            [prompt] * batch_size,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
            num_images_per_prompt=num_images_per_prompt,
        ).images

        assert images.shape == (batch_size * num_images_per_prompt, 32, 32, 3)

    def test_stable_diffusion_inpaint_num_images_per_prompt(self):
        device = "cpu"
        unet = self.dummy_cond_unet
        scheduler = PNDMScheduler(skip_prk_steps=True)
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        image = self.dummy_image.cpu().permute(0, 2, 3, 1)[0]
        init_image = Image.fromarray(np.uint8(image)).convert("RGB")
        mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((128, 128))

        # make sure here that pndm scheduler skips prk
        sd_pipe = StableDiffusionInpaintPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=self.dummy_safety_checker,
            feature_extractor=self.dummy_extractor,
        )
        sd_pipe = sd_pipe.to(device)
        sd_pipe.set_progress_bar_config(disable=None)

        prompt = "A painting of a squirrel eating a burger"

        # test num_images_per_prompt=1 (default)
        images = sd_pipe(
            prompt,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
            mask_image=mask_image,
        ).images

        assert images.shape == (1, 32, 32, 3)

        # test num_images_per_prompt=1 (default) for batch of prompts
        batch_size = 2
        images = sd_pipe(
            [prompt] * batch_size,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
            mask_image=mask_image,
        ).images

        assert images.shape == (batch_size, 32, 32, 3)

        # test num_images_per_prompt for single prompt
        num_images_per_prompt = 2
        images = sd_pipe(
            prompt,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
            mask_image=mask_image,
            num_images_per_prompt=num_images_per_prompt,
        ).images

        assert images.shape == (num_images_per_prompt, 32, 32, 3)

        # test num_images_per_prompt for batch of prompts
        batch_size = 2
        images = sd_pipe(
            [prompt] * batch_size,
            num_inference_steps=2,
            output_type="np",
            init_image=init_image,
            mask_image=mask_image,
            num_images_per_prompt=num_images_per_prompt,
        ).images

        assert images.shape == (batch_size * num_images_per_prompt, 32, 32, 3)

1015

1016
class PipelineTesterMixin(unittest.TestCase):
1017
1018
1019
1020
1021
1022
    def tearDown(self):
        # clean up the VRAM after each test
        super().tearDown()
        gc.collect()
        torch.cuda.empty_cache()

1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
    def test_smart_download(self):
        model_id = "hf-internal-testing/unet-pipeline-dummy"
        with tempfile.TemporaryDirectory() as tmpdirname:
            _ = DiffusionPipeline.from_pretrained(model_id, cache_dir=tmpdirname, force_download=True)
            local_repo_name = "--".join(["models"] + model_id.split("/"))
            snapshot_dir = os.path.join(tmpdirname, local_repo_name, "snapshots")
            snapshot_dir = os.path.join(snapshot_dir, os.listdir(snapshot_dir)[0])

            # inspect all downloaded files to make sure that everything is included
            assert os.path.isfile(os.path.join(snapshot_dir, DiffusionPipeline.config_name))
            assert os.path.isfile(os.path.join(snapshot_dir, CONFIG_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, SCHEDULER_CONFIG_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, WEIGHTS_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, "scheduler", SCHEDULER_CONFIG_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, "unet", WEIGHTS_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, "unet", WEIGHTS_NAME))
            # let's make sure the super large numpy file:
            # https://huggingface.co/hf-internal-testing/unet-pipeline-dummy/blob/main/big_array.npy
            # is not downloaded, but all the expected ones
            assert not os.path.isfile(os.path.join(snapshot_dir, "big_array.npy"))

1044
1045
1046
1047
1048
1049
1050
    @property
    def dummy_safety_checker(self):
        def check(images, *args, **kwargs):
            return images, [False] * len(images)

        return check

1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
    def test_from_pretrained_save_pretrained(self):
        # 1. Load models
        model = UNet2DModel(
            block_out_channels=(32, 64),
            layers_per_block=2,
            sample_size=32,
            in_channels=3,
            out_channels=3,
            down_block_types=("DownBlock2D", "AttnDownBlock2D"),
            up_block_types=("AttnUpBlock2D", "UpBlock2D"),
        )
        schedular = DDPMScheduler(num_train_timesteps=10)

        ddpm = DDPMPipeline(model, schedular)
1065
        ddpm.to(torch_device)
1066
        ddpm.set_progress_bar_config(disable=None)
1067
1068
1069
1070

        with tempfile.TemporaryDirectory() as tmpdirname:
            ddpm.save_pretrained(tmpdirname)
            new_ddpm = DDPMPipeline.from_pretrained(tmpdirname)
1071
            new_ddpm.to(torch_device)
1072
1073

        generator = torch.manual_seed(0)
1074
        image = ddpm(generator=generator, output_type="numpy").images
1075

1076
        generator = generator.manual_seed(0)
1077
        new_image = new_ddpm(generator=generator, output_type="numpy").images
1078
1079
1080
1081
1082
1083
1084

        assert np.abs(image - new_image).sum() < 1e-5, "Models don't give the same forward pass"

    @slow
    def test_from_pretrained_hub(self):
        model_path = "google/ddpm-cifar10-32"

1085
        scheduler = DDPMScheduler(num_train_timesteps=10)
1086

1087
1088
        ddpm = DDPMPipeline.from_pretrained(model_path, scheduler=scheduler)
        ddpm.to(torch_device)
1089
        ddpm.set_progress_bar_config(disable=None)
1090
1091
        ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler)
        ddpm_from_hub.to(torch_device)
1092
        ddpm_from_hub.set_progress_bar_config(disable=None)
1093
1094

        generator = torch.manual_seed(0)
1095
        image = ddpm(generator=generator, output_type="numpy").images
1096

1097
        generator = generator.manual_seed(0)
1098
        new_image = ddpm_from_hub(generator=generator, output_type="numpy").images
1099
1100
1101
1102
1103
1104
1105

        assert np.abs(image - new_image).sum() < 1e-5, "Models don't give the same forward pass"

    @slow
    def test_from_pretrained_hub_pass_model(self):
        model_path = "google/ddpm-cifar10-32"

1106
1107
        scheduler = DDPMScheduler(num_train_timesteps=10)

1108
1109
        # pass unet into DiffusionPipeline
        unet = UNet2DModel.from_pretrained(model_path)
1110
1111
        ddpm_from_hub_custom_model = DiffusionPipeline.from_pretrained(model_path, unet=unet, scheduler=scheduler)
        ddpm_from_hub_custom_model.to(torch_device)
1112
        ddpm_from_hub_custom_model.set_progress_bar_config(disable=None)
1113

1114
1115
        ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler)
        ddpm_from_hub.to(torch_device)
1116
        ddpm_from_hub_custom_model.set_progress_bar_config(disable=None)
1117
1118

        generator = torch.manual_seed(0)
1119
        image = ddpm_from_hub_custom_model(generator=generator, output_type="numpy").images
1120

1121
        generator = generator.manual_seed(0)
1122
        new_image = ddpm_from_hub(generator=generator, output_type="numpy").images
1123
1124
1125
1126
1127
1128
1129
1130

        assert np.abs(image - new_image).sum() < 1e-5, "Models don't give the same forward pass"

    @slow
    def test_output_format(self):
        model_path = "google/ddpm-cifar10-32"

        pipe = DDIMPipeline.from_pretrained(model_path)
1131
        pipe.to(torch_device)
1132
        pipe.set_progress_bar_config(disable=None)
1133
1134

        generator = torch.manual_seed(0)
1135
        images = pipe(generator=generator, output_type="numpy").images
1136
1137
1138
        assert images.shape == (1, 32, 32, 3)
        assert isinstance(images, np.ndarray)

1139
        images = pipe(generator=generator, output_type="pil").images
1140
1141
1142
1143
1144
        assert isinstance(images, list)
        assert len(images) == 1
        assert isinstance(images[0], PIL.Image.Image)

        # use PIL by default
1145
        images = pipe(generator=generator).images
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
        assert isinstance(images, list)
        assert isinstance(images[0], PIL.Image.Image)

    @slow
    def test_ddpm_cifar10(self):
        model_id = "google/ddpm-cifar10-32"

        unet = UNet2DModel.from_pretrained(model_id)
        scheduler = DDPMScheduler.from_config(model_id)

        ddpm = DDPMPipeline(unet=unet, scheduler=scheduler)
1157
        ddpm.to(torch_device)
1158
        ddpm.set_progress_bar_config(disable=None)
1159
1160

        generator = torch.manual_seed(0)
1161
        image = ddpm(generator=generator, output_type="numpy").images
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([0.41995, 0.35885, 0.19385, 0.38475, 0.3382, 0.2647, 0.41545, 0.3582, 0.33845])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    def test_ddim_lsun(self):
        model_id = "google/ddpm-ema-bedroom-256"

        unet = UNet2DModel.from_pretrained(model_id)
        scheduler = DDIMScheduler.from_config(model_id)

        ddpm = DDIMPipeline(unet=unet, scheduler=scheduler)
1177
        ddpm.to(torch_device)
1178
        ddpm.set_progress_bar_config(disable=None)
1179
1180

        generator = torch.manual_seed(0)
1181
        image = ddpm(generator=generator, output_type="numpy").images
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 256, 256, 3)
        expected_slice = np.array([0.00605, 0.0201, 0.0344, 0.00235, 0.00185, 0.00025, 0.00215, 0.0, 0.00685])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    def test_ddim_cifar10(self):
        model_id = "google/ddpm-cifar10-32"

        unet = UNet2DModel.from_pretrained(model_id)
1194
        scheduler = DDIMScheduler()
1195
1196

        ddim = DDIMPipeline(unet=unet, scheduler=scheduler)
1197
        ddim.to(torch_device)
1198
        ddim.set_progress_bar_config(disable=None)
1199
1200

        generator = torch.manual_seed(0)
1201
        image = ddim(generator=generator, eta=0.0, output_type="numpy").images
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([0.17235, 0.16175, 0.16005, 0.16255, 0.1497, 0.1513, 0.15045, 0.1442, 0.1453])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    def test_pndm_cifar10(self):
        model_id = "google/ddpm-cifar10-32"

        unet = UNet2DModel.from_pretrained(model_id)
1214
        scheduler = PNDMScheduler()
1215
1216

        pndm = PNDMPipeline(unet=unet, scheduler=scheduler)
1217
        pndm.to(torch_device)
1218
        pndm.set_progress_bar_config(disable=None)
1219
        generator = torch.manual_seed(0)
1220
        image = pndm(generator=generator, output_type="numpy").images
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 32, 32, 3)
        expected_slice = np.array([0.1564, 0.14645, 0.1406, 0.14715, 0.12425, 0.14045, 0.13115, 0.12175, 0.125])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    def test_ldm_text2img(self):
        ldm = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256")
1231
        ldm.to(torch_device)
1232
        ldm.set_progress_bar_config(disable=None)
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248

        prompt = "A painting of a squirrel eating a burger"
        generator = torch.manual_seed(0)
        image = ldm([prompt], generator=generator, guidance_scale=6.0, num_inference_steps=20, output_type="numpy")[
            "sample"
        ]

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 256, 256, 3)
        expected_slice = np.array([0.9256, 0.9340, 0.8933, 0.9361, 0.9113, 0.8727, 0.9122, 0.8745, 0.8099])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    def test_ldm_text2img_fast(self):
        ldm = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256")
1249
        ldm.to(torch_device)
1250
        ldm.set_progress_bar_config(disable=None)
1251
1252
1253

        prompt = "A painting of a squirrel eating a burger"
        generator = torch.manual_seed(0)
1254
        image = ldm(prompt, generator=generator, num_inference_steps=1, output_type="numpy").images
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 256, 256, 3)
        expected_slice = np.array([0.3163, 0.8670, 0.6465, 0.1865, 0.6291, 0.5139, 0.2824, 0.3723, 0.4344])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
    def test_stable_diffusion(self):
        # make sure here that pndm scheduler skips prk
1266
        sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1")
1267
        sd_pipe = sd_pipe.to(torch_device)
1268
        sd_pipe.set_progress_bar_config(disable=None)
1269
1270
1271
1272
1273
1274
1275
1276

        prompt = "A painting of a squirrel eating a burger"
        generator = torch.Generator(device=torch_device).manual_seed(0)
        with torch.autocast("cuda"):
            output = sd_pipe(
                [prompt], generator=generator, guidance_scale=6.0, num_inference_steps=20, output_type="np"
            )

1277
        image = output.images
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 512, 512, 3)
        expected_slice = np.array([0.8887, 0.915, 0.91, 0.894, 0.909, 0.912, 0.919, 0.925, 0.883])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
    def test_stable_diffusion_fast_ddim(self):
1288
        sd_pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-1")
1289
        sd_pipe = sd_pipe.to(torch_device)
1290
        sd_pipe.set_progress_bar_config(disable=None)
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305

        scheduler = DDIMScheduler(
            beta_start=0.00085,
            beta_end=0.012,
            beta_schedule="scaled_linear",
            clip_sample=False,
            set_alpha_to_one=False,
        )
        sd_pipe.scheduler = scheduler

        prompt = "A painting of a squirrel eating a burger"
        generator = torch.Generator(device=torch_device).manual_seed(0)

        with torch.autocast("cuda"):
            output = sd_pipe([prompt], generator=generator, num_inference_steps=2, output_type="numpy")
1306
        image = output.images
1307
1308
1309
1310

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 512, 512, 3)
1311
        expected_slice = np.array([0.9326, 0.923, 0.951, 0.9365, 0.9214, 0.951, 0.9365, 0.9414, 0.918])
1312
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
1313
1314
1315
1316
1317
1318
1319
1320
1321

    @slow
    def test_score_sde_ve_pipeline(self):
        model_id = "google/ncsnpp-church-256"
        model = UNet2DModel.from_pretrained(model_id)

        scheduler = ScoreSdeVeScheduler.from_config(model_id)

        sde_ve = ScoreSdeVePipeline(unet=model, scheduler=scheduler)
1322
        sde_ve.to(torch_device)
1323
        sde_ve.set_progress_bar_config(disable=None)
1324

1325
1326
        generator = torch.manual_seed(0)
        image = sde_ve(num_inference_steps=10, output_type="numpy", generator=generator).images
1327
1328
1329
1330
1331

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 256, 256, 3)

1332
        expected_slice = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0])
1333
1334
1335
1336
1337
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    def test_ldm_uncond(self):
        ldm = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256")
1338
        ldm.to(torch_device)
1339
        ldm.set_progress_bar_config(disable=None)
1340
1341

        generator = torch.manual_seed(0)
1342
        image = ldm(generator=generator, num_inference_steps=5, output_type="numpy").images
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 256, 256, 3)
        expected_slice = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    def test_ddpm_ddim_equality(self):
        model_id = "google/ddpm-cifar10-32"

        unet = UNet2DModel.from_pretrained(model_id)
1355
1356
        ddpm_scheduler = DDPMScheduler()
        ddim_scheduler = DDIMScheduler()
1357
1358

        ddpm = DDPMPipeline(unet=unet, scheduler=ddpm_scheduler)
1359
        ddpm.to(torch_device)
1360
        ddpm.set_progress_bar_config(disable=None)
1361
        ddim = DDIMPipeline(unet=unet, scheduler=ddim_scheduler)
1362
        ddim.to(torch_device)
1363
        ddim.set_progress_bar_config(disable=None)
1364
1365

        generator = torch.manual_seed(0)
1366
        ddpm_image = ddpm(generator=generator, output_type="numpy").images
1367
1368

        generator = torch.manual_seed(0)
1369
        ddim_image = ddim(generator=generator, num_inference_steps=1000, eta=1.0, output_type="numpy").images
1370
1371
1372
1373
1374
1375
1376
1377
1378

        # the values aren't exactly equal, but the images look the same visually
        assert np.abs(ddpm_image - ddim_image).max() < 1e-1

    @unittest.skip("(Anton) The test is failing for large batch sizes, needs investigation")
    def test_ddpm_ddim_equality_batched(self):
        model_id = "google/ddpm-cifar10-32"

        unet = UNet2DModel.from_pretrained(model_id)
1379
1380
        ddpm_scheduler = DDPMScheduler()
        ddim_scheduler = DDIMScheduler()
1381
1382

        ddpm = DDPMPipeline(unet=unet, scheduler=ddpm_scheduler)
1383
        ddpm.to(torch_device)
1384
        ddpm.set_progress_bar_config(disable=None)
1385

1386
        ddim = DDIMPipeline(unet=unet, scheduler=ddim_scheduler)
1387
        ddim.to(torch_device)
1388
        ddim.set_progress_bar_config(disable=None)
1389
1390

        generator = torch.manual_seed(0)
1391
        ddpm_images = ddpm(batch_size=4, generator=generator, output_type="numpy").images
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404

        generator = torch.manual_seed(0)
        ddim_images = ddim(batch_size=4, generator=generator, num_inference_steps=1000, eta=1.0, output_type="numpy")[
            "sample"
        ]

        # the values aren't exactly equal, but the images look the same visually
        assert np.abs(ddpm_images - ddim_images).max() < 1e-1

    @slow
    def test_karras_ve_pipeline(self):
        model_id = "google/ncsnpp-celebahq-256"
        model = UNet2DModel.from_pretrained(model_id)
1405
        scheduler = KarrasVeScheduler()
1406
1407

        pipe = KarrasVePipeline(unet=model, scheduler=scheduler)
1408
        pipe.to(torch_device)
1409
        pipe.set_progress_bar_config(disable=None)
1410
1411

        generator = torch.manual_seed(0)
1412
        image = pipe(num_inference_steps=20, generator=generator, output_type="numpy").images
1413
1414
1415

        image_slice = image[0, -3:, -3:, -1]
        assert image.shape == (1, 256, 256, 3)
1416
        expected_slice = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586])
1417
1418
1419
1420
1421
1422
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
    def test_lms_stable_diffusion_pipeline(self):
        model_id = "CompVis/stable-diffusion-v1-1"
1423
        pipe = StableDiffusionPipeline.from_pretrained(model_id).to(torch_device)
1424
        pipe.set_progress_bar_config(disable=None)
1425
        scheduler = LMSDiscreteScheduler.from_config(model_id, subfolder="scheduler")
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
        pipe.scheduler = scheduler

        prompt = "a photograph of an astronaut riding a horse"
        generator = torch.Generator(device=torch_device).manual_seed(0)
        image = pipe([prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="numpy")[
            "sample"
        ]

        image_slice = image[0, -3:, -3:, -1]
        assert image.shape == (1, 512, 512, 3)
        expected_slice = np.array([0.9077, 0.9254, 0.9181, 0.9227, 0.9213, 0.9367, 0.9399, 0.9406, 0.9024])
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2
1438
1439
1440

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
1441
1442
1443
    def test_stable_diffusion_memory_chunking(self):
        torch.cuda.reset_peak_memory_stats()
        model_id = "CompVis/stable-diffusion-v1-4"
1444
1445
1446
        pipe = StableDiffusionPipeline.from_pretrained(model_id, revision="fp16", torch_dtype=torch.float16).to(
            torch_device
        )
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
        pipe.set_progress_bar_config(disable=None)

        prompt = "a photograph of an astronaut riding a horse"

        # make attention efficient
        pipe.enable_attention_slicing()
        generator = torch.Generator(device=torch_device).manual_seed(0)
        with torch.autocast(torch_device):
            output_chunked = pipe(
                [prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="numpy"
            )
            image_chunked = output_chunked.images

        mem_bytes = torch.cuda.max_memory_allocated()
        torch.cuda.reset_peak_memory_stats()
        # make sure that less than 3.75 GB is allocated
        assert mem_bytes < 3.75 * 10**9

        # disable chunking
        pipe.disable_attention_slicing()
        generator = torch.Generator(device=torch_device).manual_seed(0)
        with torch.autocast(torch_device):
            output = pipe(
                [prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="numpy"
            )
            image = output.images

        # make sure that more than 3.75 GB is allocated
        mem_bytes = torch.cuda.max_memory_allocated()
        assert mem_bytes > 3.75 * 10**9
        assert np.abs(image_chunked.flatten() - image.flatten()).max() < 1e-3

1479
1480
1481
1482
1483
    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
    def test_stable_diffusion_text2img_pipeline_fp16(self):
        torch.cuda.reset_peak_memory_stats()
        model_id = "CompVis/stable-diffusion-v1-4"
1484
1485
1486
        pipe = StableDiffusionPipeline.from_pretrained(model_id, revision="fp16", torch_dtype=torch.float16).to(
            torch_device
        )
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
        pipe.set_progress_bar_config(disable=None)

        prompt = "a photograph of an astronaut riding a horse"

        generator = torch.Generator(device=torch_device).manual_seed(0)
        output_chunked = pipe(
            [prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="numpy"
        )
        image_chunked = output_chunked.images

        generator = torch.Generator(device=torch_device).manual_seed(0)
        with torch.autocast(torch_device):
            output = pipe(
                [prompt], generator=generator, guidance_scale=7.5, num_inference_steps=10, output_type="numpy"
            )
            image = output.images

        # Make sure results are close enough
        diff = np.abs(image_chunked.flatten() - image.flatten())
        # They ARE different since ops are not run always at the same precision
        # however, they should be extremely close.
        assert diff.mean() < 2e-2

1510
1511
    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
1512
1513
1514
1515
    def test_stable_diffusion_text2img_pipeline(self):
        expected_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/text2img/astronaut_riding_a_horse.png"
1516
        )
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
        expected_image = np.array(expected_image, dtype=np.float32) / 255.0

        model_id = "CompVis/stable-diffusion-v1-4"
        pipe = StableDiffusionPipeline.from_pretrained(
            model_id,
            safety_checker=self.dummy_safety_checker,
        )
        pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        pipe.enable_attention_slicing()

        prompt = "astronaut riding a horse"

        generator = torch.Generator(device=torch_device).manual_seed(0)
        output = pipe(prompt=prompt, strength=0.75, guidance_scale=7.5, generator=generator, output_type="np")
        image = output.images[0]
1533

1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
        assert image.shape == (512, 512, 3)
        assert np.abs(expected_image - image).max() < 1e-2

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
    def test_stable_diffusion_img2img_pipeline(self):
        init_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/img2img/sketch-mountains-input.jpg"
        )
        expected_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/img2img/fantasy_landscape.png"
        )
        init_image = init_image.resize((768, 512))
        expected_image = np.array(expected_image, dtype=np.float32) / 255.0
1550
1551

        model_id = "CompVis/stable-diffusion-v1-4"
1552
1553
        pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
            model_id,
1554
            safety_checker=self.dummy_safety_checker,
1555
        )
1556
        pipe.to(torch_device)
1557
        pipe.set_progress_bar_config(disable=None)
1558
        pipe.enable_attention_slicing()
1559
1560
1561
1562

        prompt = "A fantasy landscape, trending on artstation"

        generator = torch.Generator(device=torch_device).manual_seed(0)
1563
1564
1565
1566
1567
1568
1569
1570
        output = pipe(
            prompt=prompt,
            init_image=init_image,
            strength=0.75,
            guidance_scale=7.5,
            generator=generator,
            output_type="np",
        )
1571
        image = output.images[0]
1572

1573
        assert image.shape == (512, 768, 3)
1574
1575
        # img2img is flaky across GPUs even in fp32, so using MAE here
        assert np.abs(expected_image - image).mean() < 1e-2
1576
1577
1578

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
1579
    def test_stable_diffusion_img2img_pipeline_k_lms(self):
1580
1581
1582
        init_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/img2img/sketch-mountains-input.jpg"
1583
        )
1584
1585
1586
1587
1588
1589
        expected_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/img2img/fantasy_landscape_k_lms.png"
        )
        init_image = init_image.resize((768, 512))
        expected_image = np.array(expected_image, dtype=np.float32) / 255.0
1590
1591
1592
1593

        lms = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear")

        model_id = "CompVis/stable-diffusion-v1-4"
1594
1595
1596
        pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
            model_id,
            scheduler=lms,
1597
            safety_checker=self.dummy_safety_checker,
1598
        )
1599
1600
        pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
1601
        pipe.enable_attention_slicing()
1602
1603
1604
1605

        prompt = "A fantasy landscape, trending on artstation"

        generator = torch.Generator(device=torch_device).manual_seed(0)
1606
1607
1608
1609
1610
1611
1612
1613
        output = pipe(
            prompt=prompt,
            init_image=init_image,
            strength=0.75,
            guidance_scale=7.5,
            generator=generator,
            output_type="np",
        )
1614
        image = output.images[0]
1615

1616
        assert image.shape == (512, 768, 3)
1617
1618
        # img2img is flaky across GPUs even in fp32, so using MAE here
        assert np.abs(expected_image - image).mean() < 1e-2
1619
1620
1621

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
1622
    def test_stable_diffusion_inpaint_pipeline(self):
1623
1624
1625
        init_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/in_paint/overture-creations-5sI6fQgYIuo.png"
1626
        )
1627
1628
1629
1630
1631
1632
1633
1634
1635
        mask_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/in_paint/overture-creations-5sI6fQgYIuo_mask.png"
        )
        expected_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/in_paint/red_cat_sitting_on_a_park_bench.png"
        )
        expected_image = np.array(expected_image, dtype=np.float32) / 255.0
1636
1637

        model_id = "CompVis/stable-diffusion-v1-4"
1638
1639
        pipe = StableDiffusionInpaintPipeline.from_pretrained(
            model_id,
1640
            safety_checker=self.dummy_safety_checker,
1641
        )
1642
        pipe.to(torch_device)
1643
        pipe.set_progress_bar_config(disable=None)
1644
        pipe.enable_attention_slicing()
1645

1646
        prompt = "A red cat sitting on a park bench"
1647
1648

        generator = torch.Generator(device=torch_device).manual_seed(0)
1649
1650
1651
1652
1653
1654
1655
1656
1657
        output = pipe(
            prompt=prompt,
            init_image=init_image,
            mask_image=mask_image,
            strength=0.75,
            guidance_scale=7.5,
            generator=generator,
            output_type="np",
        )
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
        image = output.images[0]

        assert image.shape == (512, 512, 3)
        assert np.abs(expected_image - image).max() < 1e-2

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
    def test_stable_diffusion_inpaint_pipeline_k_lms(self):
        init_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/in_paint/overture-creations-5sI6fQgYIuo.png"
        )
        mask_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/in_paint/overture-creations-5sI6fQgYIuo_mask.png"
        )
        expected_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/in_paint/red_cat_sitting_on_a_park_bench_k_lms.png"
        )
        expected_image = np.array(expected_image, dtype=np.float32) / 255.0

        lms = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear")

        model_id = "CompVis/stable-diffusion-v1-4"
        pipe = StableDiffusionInpaintPipeline.from_pretrained(
            model_id,
            scheduler=lms,
            safety_checker=self.dummy_safety_checker,
        )
        pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        pipe.enable_attention_slicing()

        prompt = "A red cat sitting on a park bench"

        generator = torch.Generator(device=torch_device).manual_seed(0)
        output = pipe(
            prompt=prompt,
            init_image=init_image,
            mask_image=mask_image,
            strength=0.75,
            guidance_scale=7.5,
            generator=generator,
            output_type="np",
        )
1704
        image = output.images[0]
1705

1706
1707
        assert image.shape == (512, 512, 3)
        assert np.abs(expected_image - image).max() < 1e-2
1708
1709
1710

    @slow
    def test_stable_diffusion_onnx(self):
1711
        sd_pipe = StableDiffusionOnnxPipeline.from_pretrained(
1712
            "CompVis/stable-diffusion-v1-4", revision="onnx", provider="CPUExecutionProvider"
1713
        )
1714
1715
1716

        prompt = "A painting of a squirrel eating a burger"
        np.random.seed(0)
1717
        output = sd_pipe([prompt], guidance_scale=6.0, num_inference_steps=5, output_type="np")
1718
1719
1720
1721
1722
        image = output.images

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 512, 512, 3)
1723
        expected_slice = np.array([0.3602, 0.3688, 0.3652, 0.3895, 0.3782, 0.3747, 0.3927, 0.4241, 0.4327])
1724
        assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
    def test_stable_diffusion_text2img_intermediate_state(self):
        number_of_steps = 0

        def test_callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None:
            test_callback_fn.has_been_called = True
            nonlocal number_of_steps
            number_of_steps += 1
            if step == 0:
                latents = latents.detach().cpu().numpy()
                assert latents.shape == (1, 4, 64, 64)
                latents_slice = latents[0, -3:, -3:, -1]
                expected_slice = np.array(
                    [1.8285, 1.2857, -0.1024, 1.2406, -2.3068, 1.0747, -0.0818, -0.6520, -2.9506]
                )
                assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3

        test_callback_fn.has_been_called = False

        pipe = StableDiffusionPipeline.from_pretrained(
1747
            "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
        )
        pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        pipe.enable_attention_slicing()

        prompt = "Andromeda galaxy in a bottle"

        generator = torch.Generator(device=torch_device).manual_seed(0)
        with torch.autocast(torch_device):
            pipe(
                prompt=prompt,
                num_inference_steps=50,
                guidance_scale=7.5,
                generator=generator,
                callback=test_callback_fn,
                callback_steps=1,
            )
        assert test_callback_fn.has_been_called
        assert number_of_steps == 51

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
    def test_stable_diffusion_img2img_intermediate_state(self):
        number_of_steps = 0

        def test_callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None:
            test_callback_fn.has_been_called = True
            nonlocal number_of_steps
            number_of_steps += 1
            if step == 0:
                latents = latents.detach().cpu().numpy()
                assert latents.shape == (1, 4, 64, 96)
                latents_slice = latents[0, -3:, -3:, -1]
                expected_slice = np.array([0.9052, -0.0184, 0.4810, 0.2898, 0.5851, 1.4920, 0.5362, 1.9838, 0.0530])
                assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3

        test_callback_fn.has_been_called = False

        init_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/img2img/sketch-mountains-input.jpg"
        )
        init_image = init_image.resize((768, 512))

        pipe = StableDiffusionImg2ImgPipeline.from_pretrained(
1793
            "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
        )
        pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        pipe.enable_attention_slicing()

        prompt = "A fantasy landscape, trending on artstation"

        generator = torch.Generator(device=torch_device).manual_seed(0)
        with torch.autocast(torch_device):
            pipe(
                prompt=prompt,
                init_image=init_image,
                strength=0.75,
                num_inference_steps=50,
                guidance_scale=7.5,
                generator=generator,
                callback=test_callback_fn,
                callback_steps=1,
            )
        assert test_callback_fn.has_been_called
        assert number_of_steps == 38

    @slow
    @unittest.skipIf(torch_device == "cpu", "Stable diffusion is supposed to run on GPU")
    def test_stable_diffusion_inpaint_intermediate_state(self):
        number_of_steps = 0

        def test_callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None:
            test_callback_fn.has_been_called = True
            nonlocal number_of_steps
            number_of_steps += 1
            if step == 0:
                latents = latents.detach().cpu().numpy()
                assert latents.shape == (1, 4, 64, 64)
                latents_slice = latents[0, -3:, -3:, -1]
                expected_slice = np.array(
                    [-0.5472, 1.1218, -0.5505, -0.9390, -1.0794, 0.4063, 0.5158, 0.6429, -1.5246]
                )
                assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3

        test_callback_fn.has_been_called = False

        init_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/in_paint/overture-creations-5sI6fQgYIuo.png"
        )
        mask_image = load_image(
            "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
            "/in_paint/overture-creations-5sI6fQgYIuo_mask.png"
        )

        pipe = StableDiffusionInpaintPipeline.from_pretrained(
1846
            "CompVis/stable-diffusion-v1-4", revision="fp16", torch_dtype=torch.float16
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
        )
        pipe.to(torch_device)
        pipe.set_progress_bar_config(disable=None)
        pipe.enable_attention_slicing()

        prompt = "A red cat sitting on a park bench"

        generator = torch.Generator(device=torch_device).manual_seed(0)
        with torch.autocast(torch_device):
            pipe(
                prompt=prompt,
                init_image=init_image,
                mask_image=mask_image,
                strength=0.75,
                num_inference_steps=50,
                guidance_scale=7.5,
                generator=generator,
                callback=test_callback_fn,
                callback_steps=1,
            )
        assert test_callback_fn.has_been_called
        assert number_of_steps == 38

    @slow
    def test_stable_diffusion_onnx_intermediate_state(self):
        number_of_steps = 0

        def test_callback_fn(step: int, timestep: int, latents: np.ndarray) -> None:
            test_callback_fn.has_been_called = True
            nonlocal number_of_steps
            number_of_steps += 1
            if step == 0:
                assert latents.shape == (1, 4, 64, 64)
                latents_slice = latents[0, -3:, -3:, -1]
                expected_slice = np.array(
1882
                    [-0.5950, -0.3039, -1.1672, 0.1594, -1.1572, 0.6719, -1.9712, -0.0403, 0.9592]
1883
1884
1885
1886
1887
1888
                )
                assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3

        test_callback_fn.has_been_called = False

        pipe = StableDiffusionOnnxPipeline.from_pretrained(
1889
            "CompVis/stable-diffusion-v1-4", revision="onnx", provider="CPUExecutionProvider"
1890
1891
1892
1893
1894
1895
        )
        pipe.set_progress_bar_config(disable=None)

        prompt = "Andromeda galaxy in a bottle"

        np.random.seed(0)
1896
        pipe(prompt=prompt, num_inference_steps=5, guidance_scale=7.5, callback=test_callback_fn, callback_steps=1)
1897
        assert test_callback_fn.has_been_called
1898
        assert number_of_steps == 6