test_pipelines.py 54.4 KB
Newer Older
1
# coding=utf-8
Patrick von Platen's avatar
Patrick von Platen committed
2
# Copyright 2023 HuggingFace Inc.
3
4
5
6
7
8
9
10
11
12
13
14
15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

16
import gc
17
import json
18
import os
19
import random
20
import shutil
21
import sys
22
23
import tempfile
import unittest
24
import unittest.mock as mock
25
26
27

import numpy as np
import PIL
28
import requests_mock
29
import safetensors.torch
30
31
32
import torch
from parameterized import parameterized
from PIL import Image
33
from requests.exceptions import HTTPError
34
from transformers import CLIPImageProcessor, CLIPModel, CLIPTextConfig, CLIPTextModel, CLIPTokenizer
35

36
from diffusers import (
37
    AutoencoderKL,
38
39
40
41
    DDIMPipeline,
    DDIMScheduler,
    DDPMPipeline,
    DDPMScheduler,
42
    DiffusionPipeline,
43
44
45
46
    DPMSolverMultistepScheduler,
    EulerAncestralDiscreteScheduler,
    EulerDiscreteScheduler,
    LMSDiscreteScheduler,
47
    PNDMScheduler,
48
    StableDiffusionImg2ImgPipeline,
49
    StableDiffusionInpaintPipelineLegacy,
50
    StableDiffusionPipeline,
51
    UNet2DConditionModel,
52
    UNet2DModel,
53
    UniPCMultistepScheduler,
54
    logging,
55
)
56
from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
57
58
59
60
61
62
63
64
65
66
from diffusers.utils import (
    CONFIG_NAME,
    WEIGHTS_NAME,
    floats_tensor,
    is_flax_available,
    nightly,
    require_torch_2,
    slow,
    torch_device,
)
67
from diffusers.utils.testing_utils import CaptureLogger, get_tests_dir, load_numpy, require_compel, require_torch_gpu
68
69
70
71
72


torch.backends.cuda.matmul.allow_tf32 = False


73
class DownloadTests(unittest.TestCase):
74
75
76
77
78
79
80
81
82
83
84
85
    def test_one_request_upon_cached(self):
        # TODO: For some reason this test fails on MPS where no HEAD call is made.
        if torch_device == "mps":
            return

        with tempfile.TemporaryDirectory() as tmpdirname:
            with requests_mock.mock(real_http=True) as m:
                DiffusionPipeline.download(
                    "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname
                )

            download_requests = [r.method for r in m.request_history]
86
            assert download_requests.count("HEAD") == 15, "15 calls to files"
87
88
            assert download_requests.count("GET") == 17, "15 calls to files + model_info + model_index.json"
            assert (
89
                len(download_requests) == 32
90
91
92
93
94
95
96
97
            ), "2 calls per file (15 files) + send_telemetry, model_info and model_index.json"

            with requests_mock.mock(real_http=True) as m:
                DiffusionPipeline.download(
                    "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname
                )

            cache_requests = [r.method for r in m.request_history]
98
            assert cache_requests.count("HEAD") == 1, "model_index.json is only HEAD"
99
100
101
102
103
            assert cache_requests.count("GET") == 1, "model info is only GET"
            assert (
                len(cache_requests) == 2
            ), "We should call only `model_info` to check for _commit hash and `send_telemetry`"

104
105
106
    def test_download_only_pytorch(self):
        with tempfile.TemporaryDirectory() as tmpdirname:
            # pipeline has Flax weights
107
            tmpdirname = DiffusionPipeline.download(
108
109
110
                "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname
            )

111
            all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
112
113
114
115
116
            files = [item for sublist in all_root_files for item in sublist]

            # None of the downloaded files should be a flax file even if we have some here:
            # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_flax_model.msgpack
            assert not any(f.endswith(".msgpack") for f in files)
117
118
119
            # We need to never convert this tiny model to safetensors for this test to pass
            assert not any(f.endswith(".safetensors") for f in files)

120
121
122
123
124
125
126
127
128
129
130
    def test_force_safetensors_error(self):
        with tempfile.TemporaryDirectory() as tmpdirname:
            # pipeline has Flax weights
            with self.assertRaises(EnvironmentError):
                tmpdirname = DiffusionPipeline.download(
                    "hf-internal-testing/tiny-stable-diffusion-pipe-no-safetensors",
                    safety_checker=None,
                    cache_dir=tmpdirname,
                    use_safetensors=True,
                )

131
132
133
134
135
136
137
138
139
140
141
    def test_returned_cached_folder(self):
        prompt = "hello"
        pipe = StableDiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
        )
        _, local_path = StableDiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None, return_cached_folder=True
        )
        pipe_2 = StableDiffusionPipeline.from_pretrained(local_path)

        pipe = pipe.to(torch_device)
142
        pipe_2 = pipe_2.to(torch_device)
143

144
        generator = torch.manual_seed(0)
145
146
        out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images

147
        generator = torch.manual_seed(0)
148
149
150
151
        out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images

        assert np.max(np.abs(out - out_2)) < 1e-3

152
153
154
    def test_download_safetensors(self):
        with tempfile.TemporaryDirectory() as tmpdirname:
            # pipeline has Flax weights
155
            tmpdirname = DiffusionPipeline.download(
156
157
158
159
160
                "hf-internal-testing/tiny-stable-diffusion-pipe-safetensors",
                safety_checker=None,
                cache_dir=tmpdirname,
            )

161
            all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
162
163
164
165
166
            files = [item for sublist in all_root_files for item in sublist]

            # None of the downloaded files should be a pytorch file even if we have some here:
            # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_flax_model.msgpack
            assert not any(f.endswith(".bin") for f in files)
167

168
169
170
171
172
    def test_download_no_safety_checker(self):
        prompt = "hello"
        pipe = StableDiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
        )
173
        pipe = pipe.to(torch_device)
174
        generator = torch.manual_seed(0)
175
176
177
        out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images

        pipe_2 = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch")
178
        pipe_2 = pipe_2.to(torch_device)
179
        generator = torch.manual_seed(0)
180
        out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
181
182
183
184
185
186
187
188

        assert np.max(np.abs(out - out_2)) < 1e-3

    def test_load_no_safety_checker_explicit_locally(self):
        prompt = "hello"
        pipe = StableDiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
        )
189
        pipe = pipe.to(torch_device)
190
        generator = torch.manual_seed(0)
191
192
193
194
195
        out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images

        with tempfile.TemporaryDirectory() as tmpdirname:
            pipe.save_pretrained(tmpdirname)
            pipe_2 = StableDiffusionPipeline.from_pretrained(tmpdirname, safety_checker=None)
196
            pipe_2 = pipe_2.to(torch_device)
197

198
            generator = torch.manual_seed(0)
199
200

            out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
201
202
203
204
205
206

        assert np.max(np.abs(out - out_2)) < 1e-3

    def test_load_no_safety_checker_default_locally(self):
        prompt = "hello"
        pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch")
207
        pipe = pipe.to(torch_device)
208
209

        generator = torch.manual_seed(0)
210
211
212
213
214
        out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images

        with tempfile.TemporaryDirectory() as tmpdirname:
            pipe.save_pretrained(tmpdirname)
            pipe_2 = StableDiffusionPipeline.from_pretrained(tmpdirname)
215
            pipe_2 = pipe_2.to(torch_device)
216

217
            generator = torch.manual_seed(0)
218
219

            out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="numpy").images
220
221
222

        assert np.max(np.abs(out - out_2)) < 1e-3

223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
    def test_cached_files_are_used_when_no_internet(self):
        # A mock response for an HTTP head request to emulate server down
        response_mock = mock.Mock()
        response_mock.status_code = 500
        response_mock.headers = {}
        response_mock.raise_for_status.side_effect = HTTPError
        response_mock.json.return_value = {}

        # Download this model to make sure it's in the cache.
        orig_pipe = StableDiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
        )
        orig_comps = {k: v for k, v in orig_pipe.components.items() if hasattr(v, "parameters")}

        # Under the mock environment we get a 500 error when trying to reach the model.
        with mock.patch("requests.request", return_value=response_mock):
            # Download this model to make sure it's in the cache.
            pipe = StableDiffusionPipeline.from_pretrained(
                "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None, local_files_only=True
            )
            comps = {k: v for k, v in pipe.components.items() if hasattr(v, "parameters")}

        for m1, m2 in zip(orig_comps.values(), comps.values()):
            for p1, p2 in zip(m1.parameters(), m2.parameters()):
                if p1.data.ne(p2.data).sum() > 0:
                    assert False, "Parameters not the same!"

    def test_download_from_variant_folder(self):
        for safe_avail in [False, True]:
            import diffusers

            diffusers.utils.import_utils._safetensors_available = safe_avail

            other_format = ".bin" if safe_avail else ".safetensors"
            with tempfile.TemporaryDirectory() as tmpdirname:
258
                tmpdirname = StableDiffusionPipeline.download(
259
260
                    "hf-internal-testing/stable-diffusion-all-variants", cache_dir=tmpdirname
                )
261
                all_root_files = [t[-1] for t in os.walk(tmpdirname)]
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
                files = [item for sublist in all_root_files for item in sublist]

                # None of the downloaded files should be a variant file even if we have some here:
                # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet
                assert len(files) == 15, f"We should only download 15 files, not {len(files)}"
                assert not any(f.endswith(other_format) for f in files)
                # no variants
                assert not any(len(f.split(".")) == 3 for f in files)

        diffusers.utils.import_utils._safetensors_available = True

    def test_download_variant_all(self):
        for safe_avail in [False, True]:
            import diffusers

            diffusers.utils.import_utils._safetensors_available = safe_avail

            other_format = ".bin" if safe_avail else ".safetensors"
            this_format = ".safetensors" if safe_avail else ".bin"
            variant = "fp16"

            with tempfile.TemporaryDirectory() as tmpdirname:
284
                tmpdirname = StableDiffusionPipeline.download(
285
286
                    "hf-internal-testing/stable-diffusion-all-variants", cache_dir=tmpdirname, variant=variant
                )
287
                all_root_files = [t[-1] for t in os.walk(tmpdirname)]
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
                files = [item for sublist in all_root_files for item in sublist]

                # None of the downloaded files should be a non-variant file even if we have some here:
                # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet
                assert len(files) == 15, f"We should only download 15 files, not {len(files)}"
                # unet, vae, text_encoder, safety_checker
                assert len([f for f in files if f.endswith(f"{variant}{this_format}")]) == 4
                # all checkpoints should have variant ending
                assert not any(f.endswith(this_format) and not f.endswith(f"{variant}{this_format}") for f in files)
                assert not any(f.endswith(other_format) for f in files)

        diffusers.utils.import_utils._safetensors_available = True

    def test_download_variant_partly(self):
        for safe_avail in [False, True]:
            import diffusers

            diffusers.utils.import_utils._safetensors_available = safe_avail

            other_format = ".bin" if safe_avail else ".safetensors"
            this_format = ".safetensors" if safe_avail else ".bin"
            variant = "no_ema"

            with tempfile.TemporaryDirectory() as tmpdirname:
312
                tmpdirname = StableDiffusionPipeline.download(
313
314
                    "hf-internal-testing/stable-diffusion-all-variants", cache_dir=tmpdirname, variant=variant
                )
315
                all_root_files = [t[-1] for t in os.walk(tmpdirname)]
316
317
                files = [item for sublist in all_root_files for item in sublist]

318
                unet_files = os.listdir(os.path.join(tmpdirname, "unet"))
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340

                # Some of the downloaded files should be a non-variant file, check:
                # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet
                assert len(files) == 15, f"We should only download 15 files, not {len(files)}"
                # only unet has "no_ema" variant
                assert f"diffusion_pytorch_model.{variant}{this_format}" in unet_files
                assert len([f for f in files if f.endswith(f"{variant}{this_format}")]) == 1
                # vae, safety_checker and text_encoder should have no variant
                assert sum(f.endswith(this_format) and not f.endswith(f"{variant}{this_format}") for f in files) == 3
                assert not any(f.endswith(other_format) for f in files)

        diffusers.utils.import_utils._safetensors_available = True

    def test_download_broken_variant(self):
        for safe_avail in [False, True]:
            import diffusers

            diffusers.utils.import_utils._safetensors_available = safe_avail
            # text encoder is missing no variant and "no_ema" variant weights, so the following can't work
            for variant in [None, "no_ema"]:
                with self.assertRaises(OSError) as error_context:
                    with tempfile.TemporaryDirectory() as tmpdirname:
341
                        tmpdirname = StableDiffusionPipeline.from_pretrained(
342
343
344
345
346
347
348
349
350
                            "hf-internal-testing/stable-diffusion-broken-variants",
                            cache_dir=tmpdirname,
                            variant=variant,
                        )

                assert "Error no file name" in str(error_context.exception)

            # text encoder has fp16 variants so we can load it
            with tempfile.TemporaryDirectory() as tmpdirname:
351
                tmpdirname = StableDiffusionPipeline.download(
352
353
354
                    "hf-internal-testing/stable-diffusion-broken-variants", cache_dir=tmpdirname, variant="fp16"
                )

355
                all_root_files = [t[-1] for t in os.walk(tmpdirname)]
356
357
358
359
360
361
362
363
364
                files = [item for sublist in all_root_files for item in sublist]

                # None of the downloaded files should be a non-variant file even if we have some here:
                # https://huggingface.co/hf-internal-testing/stable-diffusion-broken-variants/tree/main/unet
                assert len(files) == 15, f"We should only download 15 files, not {len(files)}"
                # only unet has "no_ema" variant

        diffusers.utils.import_utils._safetensors_available = True

365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
    def test_text_inversion_download(self):
        pipe = StableDiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
        )
        pipe = pipe.to(torch_device)

        num_tokens = len(pipe.tokenizer)

        # single token load local
        with tempfile.TemporaryDirectory() as tmpdirname:
            ten = {"<*>": torch.ones((32,))}
            torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin"))

            pipe.load_textual_inversion(tmpdirname)

            token = pipe.tokenizer.convert_tokens_to_ids("<*>")
            assert token == num_tokens, "Added token must be at spot `num_tokens`"
            assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 32
            assert pipe._maybe_convert_prompt("<*>", pipe.tokenizer) == "<*>"

            prompt = "hey <*>"
            out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
            assert out.shape == (1, 128, 128, 3)

        # single token load local with weight name
        with tempfile.TemporaryDirectory() as tmpdirname:
            ten = {"<**>": 2 * torch.ones((1, 32))}
            torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin"))

            pipe.load_textual_inversion(tmpdirname, weight_name="learned_embeds.bin")

            token = pipe.tokenizer.convert_tokens_to_ids("<**>")
            assert token == num_tokens + 1, "Added token must be at spot `num_tokens`"
            assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 64
            assert pipe._maybe_convert_prompt("<**>", pipe.tokenizer) == "<**>"

            prompt = "hey <**>"
            out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
            assert out.shape == (1, 128, 128, 3)

        # multi token load
        with tempfile.TemporaryDirectory() as tmpdirname:
            ten = {"<***>": torch.cat([3 * torch.ones((1, 32)), 4 * torch.ones((1, 32)), 5 * torch.ones((1, 32))])}
            torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin"))

            pipe.load_textual_inversion(tmpdirname)

            token = pipe.tokenizer.convert_tokens_to_ids("<***>")
            token_1 = pipe.tokenizer.convert_tokens_to_ids("<***>_1")
            token_2 = pipe.tokenizer.convert_tokens_to_ids("<***>_2")

            assert token == num_tokens + 2, "Added token must be at spot `num_tokens`"
            assert token_1 == num_tokens + 3, "Added token must be at spot `num_tokens`"
            assert token_2 == num_tokens + 4, "Added token must be at spot `num_tokens`"
            assert pipe.text_encoder.get_input_embeddings().weight[-3].sum().item() == 96
            assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 128
            assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 160
            assert pipe._maybe_convert_prompt("<***>", pipe.tokenizer) == "<***><***>_1<***>_2"

            prompt = "hey <***>"
            out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
            assert out.shape == (1, 128, 128, 3)

        # multi token load a1111
        with tempfile.TemporaryDirectory() as tmpdirname:
            ten = {
                "string_to_param": {
                    "*": torch.cat([3 * torch.ones((1, 32)), 4 * torch.ones((1, 32)), 5 * torch.ones((1, 32))])
                },
                "name": "<****>",
            }
            torch.save(ten, os.path.join(tmpdirname, "a1111.bin"))

            pipe.load_textual_inversion(tmpdirname, weight_name="a1111.bin")

            token = pipe.tokenizer.convert_tokens_to_ids("<****>")
            token_1 = pipe.tokenizer.convert_tokens_to_ids("<****>_1")
            token_2 = pipe.tokenizer.convert_tokens_to_ids("<****>_2")

            assert token == num_tokens + 5, "Added token must be at spot `num_tokens`"
            assert token_1 == num_tokens + 6, "Added token must be at spot `num_tokens`"
            assert token_2 == num_tokens + 7, "Added token must be at spot `num_tokens`"
            assert pipe.text_encoder.get_input_embeddings().weight[-3].sum().item() == 96
            assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 128
            assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 160
            assert pipe._maybe_convert_prompt("<****>", pipe.tokenizer) == "<****><****>_1<****>_2"

            prompt = "hey <****>"
            out = pipe(prompt, num_inference_steps=1, output_type="numpy").images
            assert out.shape == (1, 128, 128, 3)

456

Patrick von Platen's avatar
Patrick von Platen committed
457
458
459
460
461
class CustomPipelineTests(unittest.TestCase):
    def test_load_custom_pipeline(self):
        pipeline = DiffusionPipeline.from_pretrained(
            "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline"
        )
462
        pipeline = pipeline.to(torch_device)
Patrick von Platen's avatar
Patrick von Platen committed
463
464
465
466
        # NOTE that `"CustomPipeline"` is not a class that is defined in this library, but solely on the Hub
        # under https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py#L24
        assert pipeline.__class__.__name__ == "CustomPipeline"

467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
    def test_load_custom_github(self):
        pipeline = DiffusionPipeline.from_pretrained(
            "google/ddpm-cifar10-32", custom_pipeline="one_step_unet", custom_revision="main"
        )

        # make sure that on "main" pipeline gives only ones because of: https://github.com/huggingface/diffusers/pull/1690
        with torch.no_grad():
            output = pipeline()

        assert output.numel() == output.sum()

        # hack since Python doesn't like overwriting modules: https://stackoverflow.com/questions/3105801/unload-a-module-in-python
        # Could in the future work with hashes instead.
        del sys.modules["diffusers_modules.git.one_step_unet"]

        pipeline = DiffusionPipeline.from_pretrained(
            "google/ddpm-cifar10-32", custom_pipeline="one_step_unet", custom_revision="0.10.2"
        )
        with torch.no_grad():
            output = pipeline()

        assert output.numel() != output.sum()

        assert pipeline.__class__.__name__ == "UnetSchedulerOneForwardPipeline"

Patrick von Platen's avatar
Patrick von Platen committed
492
493
494
495
    def test_run_custom_pipeline(self):
        pipeline = DiffusionPipeline.from_pretrained(
            "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline"
        )
496
        pipeline = pipeline.to(torch_device)
Patrick von Platen's avatar
Patrick von Platen committed
497
498
499
        images, output_str = pipeline(num_inference_steps=2, output_type="np")

        assert images[0].shape == (1, 32, 32, 3)
500

Patrick von Platen's avatar
Patrick von Platen committed
501
502
503
        # compare output to https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py#L102
        assert output_str == "This is a test"

504
    def test_local_custom_pipeline_repo(self):
Patrick von Platen's avatar
Patrick von Platen committed
505
506
507
508
        local_custom_pipeline_path = get_tests_dir("fixtures/custom_pipeline")
        pipeline = DiffusionPipeline.from_pretrained(
            "google/ddpm-cifar10-32", custom_pipeline=local_custom_pipeline_path
        )
509
        pipeline = pipeline.to(torch_device)
Patrick von Platen's avatar
Patrick von Platen committed
510
511
512
513
514
515
516
        images, output_str = pipeline(num_inference_steps=2, output_type="np")

        assert pipeline.__class__.__name__ == "CustomLocalPipeline"
        assert images[0].shape == (1, 32, 32, 3)
        # compare to https://github.com/huggingface/diffusers/blob/main/tests/fixtures/custom_pipeline/pipeline.py#L102
        assert output_str == "This is a local test"

517
518
519
520
521
522
523
524
525
526
527
528
529
530
    def test_local_custom_pipeline_file(self):
        local_custom_pipeline_path = get_tests_dir("fixtures/custom_pipeline")
        local_custom_pipeline_path = os.path.join(local_custom_pipeline_path, "what_ever.py")
        pipeline = DiffusionPipeline.from_pretrained(
            "google/ddpm-cifar10-32", custom_pipeline=local_custom_pipeline_path
        )
        pipeline = pipeline.to(torch_device)
        images, output_str = pipeline(num_inference_steps=2, output_type="np")

        assert pipeline.__class__.__name__ == "CustomLocalPipeline"
        assert images[0].shape == (1, 32, 32, 3)
        # compare to https://github.com/huggingface/diffusers/blob/main/tests/fixtures/custom_pipeline/pipeline.py#L102
        assert output_str == "This is a local test"

Patrick von Platen's avatar
Patrick von Platen committed
531
    @slow
532
    @require_torch_gpu
533
    def test_download_from_git(self):
Patrick von Platen's avatar
Patrick von Platen committed
534
535
        clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"

536
        feature_extractor = CLIPImageProcessor.from_pretrained(clip_model_id)
537
        clip_model = CLIPModel.from_pretrained(clip_model_id, torch_dtype=torch.float16)
Patrick von Platen's avatar
Patrick von Platen committed
538
539
540
541
542
543

        pipeline = DiffusionPipeline.from_pretrained(
            "CompVis/stable-diffusion-v1-4",
            custom_pipeline="clip_guided_stable_diffusion",
            clip_model=clip_model,
            feature_extractor=feature_extractor,
544
            torch_dtype=torch.float16,
Patrick von Platen's avatar
Patrick von Platen committed
545
        )
546
        pipeline.enable_attention_slicing()
Patrick von Platen's avatar
Patrick von Platen committed
547
548
549
550
551
552
553
554
555
556
        pipeline = pipeline.to(torch_device)

        # NOTE that `"CLIPGuidedStableDiffusion"` is not a class that is defined in the pypi package of th e library, but solely on the community examples folder of GitHub under:
        # https://github.com/huggingface/diffusers/blob/main/examples/community/clip_guided_stable_diffusion.py
        assert pipeline.__class__.__name__ == "CLIPGuidedStableDiffusion"

        image = pipeline("a prompt", num_inference_steps=2, output_type="np").images[0]
        assert image.shape == (512, 512, 3)


557
class PipelineFastTests(unittest.TestCase):
558
559
560
561
562
563
564
565
566
567
    def tearDown(self):
        # clean up the VRAM after each test
        super().tearDown()
        gc.collect()
        torch.cuda.empty_cache()

        import diffusers

        diffusers.utils.import_utils._safetensors_available = True

568
569
570
571
572
573
574
575
    def dummy_image(self):
        batch_size = 1
        num_channels = 3
        sizes = (32, 32)

        image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device)
        return image

576
    def dummy_uncond_unet(self, sample_size=32):
577
578
579
580
        torch.manual_seed(0)
        model = UNet2DModel(
            block_out_channels=(32, 64),
            layers_per_block=2,
581
            sample_size=sample_size,
582
583
584
585
586
587
588
            in_channels=3,
            out_channels=3,
            down_block_types=("DownBlock2D", "AttnDownBlock2D"),
            up_block_types=("AttnUpBlock2D", "UpBlock2D"),
        )
        return model

589
    def dummy_cond_unet(self, sample_size=32):
590
591
592
593
        torch.manual_seed(0)
        model = UNet2DConditionModel(
            block_out_channels=(32, 64),
            layers_per_block=2,
594
            sample_size=sample_size,
595
596
597
598
599
600
601
602
            in_channels=4,
            out_channels=4,
            down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
            up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
            cross_attention_dim=32,
        )
        return model

603
    @property
604
605
606
607
608
609
610
611
612
613
614
615
    def dummy_vae(self):
        torch.manual_seed(0)
        model = AutoencoderKL(
            block_out_channels=[32, 64],
            in_channels=3,
            out_channels=3,
            down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
            up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
            latent_channels=4,
        )
        return model

616
    @property
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
    def dummy_text_encoder(self):
        torch.manual_seed(0)
        config = CLIPTextConfig(
            bos_token_id=0,
            eos_token_id=2,
            hidden_size=32,
            intermediate_size=37,
            layer_norm_eps=1e-05,
            num_attention_heads=4,
            num_hidden_layers=5,
            pad_token_id=1,
            vocab_size=1000,
        )
        return CLIPTextModel(config)

632
    @property
633
634
635
636
637
638
639
640
641
642
643
644
645
646
    def dummy_extractor(self):
        def extract(*args, **kwargs):
            class Out:
                def __init__(self):
                    self.pixel_values = torch.ones([0])

                def to(self, device):
                    self.pixel_values.to(device)
                    return self

            return Out()

        return extract

647
648
649
    @parameterized.expand(
        [
            [DDIMScheduler, DDIMPipeline, 32],
650
            [DDPMScheduler, DDPMPipeline, 32],
651
            [DDIMScheduler, DDIMPipeline, (32, 64)],
652
            [DDPMScheduler, DDPMPipeline, (64, 32)],
653
654
655
656
657
658
659
        ]
    )
    def test_uncond_unet_components(self, scheduler_fn=DDPMScheduler, pipeline_fn=DDPMPipeline, sample_size=32):
        unet = self.dummy_uncond_unet(sample_size)
        scheduler = scheduler_fn()
        pipeline = pipeline_fn(unet, scheduler).to(torch_device)

660
        generator = torch.manual_seed(0)
661
662
663
664
665
666
667
668
669
        out_image = pipeline(
            generator=generator,
            num_inference_steps=2,
            output_type="np",
        ).images
        sample_size = (sample_size, sample_size) if isinstance(sample_size, int) else sample_size
        assert out_image.shape == (1, *sample_size, 3)

    def test_stable_diffusion_components(self):
670
        """Test that components property works correctly"""
671
        unet = self.dummy_cond_unet()
672
        scheduler = PNDMScheduler(skip_prk_steps=True)
673
674
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
675
676
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

677
        image = self.dummy_image().cpu().permute(0, 2, 3, 1)[0]
678
        init_image = Image.fromarray(np.uint8(image)).convert("RGB")
Patrick von Platen's avatar
Patrick von Platen committed
679
        mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((32, 32))
680
681

        # make sure here that pndm scheduler skips prk
682
        inpaint = StableDiffusionInpaintPipelineLegacy(
683
684
685
686
687
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
688
            safety_checker=None,
689
            feature_extractor=self.dummy_extractor,
690
691
692
        ).to(torch_device)
        img2img = StableDiffusionImg2ImgPipeline(**inpaint.components).to(torch_device)
        text2img = StableDiffusionPipeline(**inpaint.components).to(torch_device)
693
694

        prompt = "A painting of a squirrel eating a burger"
695

696
        generator = torch.manual_seed(0)
697
        image_inpaint = inpaint(
698
699
700
701
            [prompt],
            generator=generator,
            num_inference_steps=2,
            output_type="np",
702
            image=init_image,
703
704
705
            mask_image=mask_image,
        ).images
        image_img2img = img2img(
706
707
708
709
            [prompt],
            generator=generator,
            num_inference_steps=2,
            output_type="np",
710
            image=init_image,
711
712
713
        ).images
        image_text2img = text2img(
            [prompt],
714
715
716
            generator=generator,
            num_inference_steps=2,
            output_type="np",
717
        ).images
718

719
720
        assert image_inpaint.shape == (1, 32, 32, 3)
        assert image_img2img.shape == (1, 32, 32, 3)
721
        assert image_text2img.shape == (1, 64, 64, 3)
722

723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
    @require_torch_gpu
    def test_pipe_false_offload_warn(self):
        unet = self.dummy_cond_unet()
        scheduler = PNDMScheduler(skip_prk_steps=True)
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        sd = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=None,
            feature_extractor=self.dummy_extractor,
        )

        sd.enable_model_cpu_offload()

        logger = logging.get_logger("diffusers.pipelines.pipeline_utils")
        with CaptureLogger(logger) as cap_logger:
            sd.to("cuda")

        assert "It is strongly recommended against doing so" in str(cap_logger)

        sd = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=None,
            feature_extractor=self.dummy_extractor,
        )

759
    def test_set_scheduler(self):
760
        unet = self.dummy_cond_unet()
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
        scheduler = PNDMScheduler(skip_prk_steps=True)
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        sd = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=None,
            feature_extractor=self.dummy_extractor,
        )

        sd.scheduler = DDIMScheduler.from_config(sd.scheduler.config)
        assert isinstance(sd.scheduler, DDIMScheduler)
        sd.scheduler = DDPMScheduler.from_config(sd.scheduler.config)
        assert isinstance(sd.scheduler, DDPMScheduler)
        sd.scheduler = PNDMScheduler.from_config(sd.scheduler.config)
        assert isinstance(sd.scheduler, PNDMScheduler)
        sd.scheduler = LMSDiscreteScheduler.from_config(sd.scheduler.config)
        assert isinstance(sd.scheduler, LMSDiscreteScheduler)
        sd.scheduler = EulerDiscreteScheduler.from_config(sd.scheduler.config)
        assert isinstance(sd.scheduler, EulerDiscreteScheduler)
        sd.scheduler = EulerAncestralDiscreteScheduler.from_config(sd.scheduler.config)
        assert isinstance(sd.scheduler, EulerAncestralDiscreteScheduler)
        sd.scheduler = DPMSolverMultistepScheduler.from_config(sd.scheduler.config)
        assert isinstance(sd.scheduler, DPMSolverMultistepScheduler)

    def test_set_scheduler_consistency(self):
792
        unet = self.dummy_cond_unet()
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
        pndm = PNDMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler")
        ddim = DDIMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler")
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        sd = StableDiffusionPipeline(
            unet=unet,
            scheduler=pndm,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=None,
            feature_extractor=self.dummy_extractor,
        )

        pndm_config = sd.scheduler.config
        sd.scheduler = DDPMScheduler.from_config(pndm_config)
        sd.scheduler = PNDMScheduler.from_config(sd.scheduler.config)
        pndm_config_2 = sd.scheduler.config
        pndm_config_2 = {k: v for k, v in pndm_config_2.items() if k in pndm_config}

        assert dict(pndm_config) == dict(pndm_config_2)

        sd = StableDiffusionPipeline(
            unet=unet,
            scheduler=ddim,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=None,
            feature_extractor=self.dummy_extractor,
        )

        ddim_config = sd.scheduler.config
        sd.scheduler = LMSDiscreteScheduler.from_config(ddim_config)
        sd.scheduler = DDIMScheduler.from_config(sd.scheduler.config)
        ddim_config_2 = sd.scheduler.config
        ddim_config_2 = {k: v for k, v in ddim_config_2.items() if k in ddim_config}

        assert dict(ddim_config) == dict(ddim_config_2)

835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
    def test_save_safe_serialization(self):
        pipeline = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch")
        with tempfile.TemporaryDirectory() as tmpdirname:
            pipeline.save_pretrained(tmpdirname, safe_serialization=True)

            # Validate that the VAE safetensor exists and are of the correct format
            vae_path = os.path.join(tmpdirname, "vae", "diffusion_pytorch_model.safetensors")
            assert os.path.exists(vae_path), f"Could not find {vae_path}"
            _ = safetensors.torch.load_file(vae_path)

            # Validate that the UNet safetensor exists and are of the correct format
            unet_path = os.path.join(tmpdirname, "unet", "diffusion_pytorch_model.safetensors")
            assert os.path.exists(unet_path), f"Could not find {unet_path}"
            _ = safetensors.torch.load_file(unet_path)

            # Validate that the text encoder safetensor exists and are of the correct format
            text_encoder_path = os.path.join(tmpdirname, "text_encoder", "model.safetensors")
852
853
            assert os.path.exists(text_encoder_path), f"Could not find {text_encoder_path}"
            _ = safetensors.torch.load_file(text_encoder_path)
854
855
856
857
858
859
860
861

            pipeline = StableDiffusionPipeline.from_pretrained(tmpdirname)
            assert pipeline.unet is not None
            assert pipeline.vae is not None
            assert pipeline.text_encoder is not None
            assert pipeline.scheduler is not None
            assert pipeline.feature_extractor is not None

862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
    def test_no_pytorch_download_when_doing_safetensors(self):
        # by default we don't download
        with tempfile.TemporaryDirectory() as tmpdirname:
            _ = StableDiffusionPipeline.from_pretrained(
                "hf-internal-testing/diffusers-stable-diffusion-tiny-all", cache_dir=tmpdirname
            )

            path = os.path.join(
                tmpdirname,
                "models--hf-internal-testing--diffusers-stable-diffusion-tiny-all",
                "snapshots",
                "07838d72e12f9bcec1375b0482b80c1d399be843",
                "unet",
            )
            # safetensors exists
            assert os.path.exists(os.path.join(path, "diffusion_pytorch_model.safetensors"))
            # pytorch does not
            assert not os.path.exists(os.path.join(path, "diffusion_pytorch_model.bin"))

    def test_no_safetensors_download_when_doing_pytorch(self):
        # mock diffusers safetensors not available
        import diffusers

        diffusers.utils.import_utils._safetensors_available = False

        with tempfile.TemporaryDirectory() as tmpdirname:
            _ = StableDiffusionPipeline.from_pretrained(
                "hf-internal-testing/diffusers-stable-diffusion-tiny-all", cache_dir=tmpdirname
            )

            path = os.path.join(
                tmpdirname,
                "models--hf-internal-testing--diffusers-stable-diffusion-tiny-all",
                "snapshots",
                "07838d72e12f9bcec1375b0482b80c1d399be843",
                "unet",
            )
            # safetensors does not exists
            assert not os.path.exists(os.path.join(path, "diffusion_pytorch_model.safetensors"))
            # pytorch does
            assert os.path.exists(os.path.join(path, "diffusion_pytorch_model.bin"))

        diffusers.utils.import_utils._safetensors_available = True

906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
    def test_optional_components(self):
        unet = self.dummy_cond_unet()
        pndm = PNDMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler")
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        orig_sd = StableDiffusionPipeline(
            unet=unet,
            scheduler=pndm,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=unet,
            feature_extractor=self.dummy_extractor,
        )
        sd = orig_sd

        assert sd.config.requires_safety_checker is True

        with tempfile.TemporaryDirectory() as tmpdirname:
            sd.save_pretrained(tmpdirname)

            # Test that passing None works
            sd = StableDiffusionPipeline.from_pretrained(
                tmpdirname, feature_extractor=None, safety_checker=None, requires_safety_checker=False
            )

            assert sd.config.requires_safety_checker is False
            assert sd.config.safety_checker == (None, None)
            assert sd.config.feature_extractor == (None, None)

        with tempfile.TemporaryDirectory() as tmpdirname:
            sd.save_pretrained(tmpdirname)

            # Test that loading previous None works
            sd = StableDiffusionPipeline.from_pretrained(tmpdirname)

            assert sd.config.requires_safety_checker is False
            assert sd.config.safety_checker == (None, None)
            assert sd.config.feature_extractor == (None, None)

            orig_sd.save_pretrained(tmpdirname)

            # Test that loading without any directory works
            shutil.rmtree(os.path.join(tmpdirname, "safety_checker"))
            with open(os.path.join(tmpdirname, sd.config_name)) as f:
                config = json.load(f)
                config["safety_checker"] = [None, None]
            with open(os.path.join(tmpdirname, sd.config_name), "w") as f:
                json.dump(config, f)

            sd = StableDiffusionPipeline.from_pretrained(tmpdirname, requires_safety_checker=False)
            sd.save_pretrained(tmpdirname)
            sd = StableDiffusionPipeline.from_pretrained(tmpdirname)

            assert sd.config.requires_safety_checker is False
            assert sd.config.safety_checker == (None, None)
            assert sd.config.feature_extractor == (None, None)

            # Test that loading from deleted model index works
            with open(os.path.join(tmpdirname, sd.config_name)) as f:
                config = json.load(f)
                del config["safety_checker"]
                del config["feature_extractor"]
            with open(os.path.join(tmpdirname, sd.config_name), "w") as f:
                json.dump(config, f)

            sd = StableDiffusionPipeline.from_pretrained(tmpdirname)

            assert sd.config.requires_safety_checker is False
            assert sd.config.safety_checker == (None, None)
            assert sd.config.feature_extractor == (None, None)

        with tempfile.TemporaryDirectory() as tmpdirname:
            sd.save_pretrained(tmpdirname)

            # Test that partially loading works
            sd = StableDiffusionPipeline.from_pretrained(tmpdirname, feature_extractor=self.dummy_extractor)

            assert sd.config.requires_safety_checker is False
            assert sd.config.safety_checker == (None, None)
            assert sd.config.feature_extractor != (None, None)

            # Test that partially loading works
            sd = StableDiffusionPipeline.from_pretrained(
                tmpdirname,
                feature_extractor=self.dummy_extractor,
                safety_checker=unet,
                requires_safety_checker=[True, True],
            )

            assert sd.config.requires_safety_checker == [True, True]
            assert sd.config.safety_checker != (None, None)
            assert sd.config.feature_extractor != (None, None)

        with tempfile.TemporaryDirectory() as tmpdirname:
            sd.save_pretrained(tmpdirname)
            sd = StableDiffusionPipeline.from_pretrained(tmpdirname, feature_extractor=self.dummy_extractor)

            assert sd.config.requires_safety_checker == [True, True]
            assert sd.config.safety_checker != (None, None)
            assert sd.config.feature_extractor != (None, None)

1010

1011
@slow
1012
@require_torch_gpu
1013
class PipelineSlowTests(unittest.TestCase):
1014
1015
1016
1017
1018
1019
    def tearDown(self):
        # clean up the VRAM after each test
        super().tearDown()
        gc.collect()
        torch.cuda.empty_cache()

1020
1021
1022
    def test_smart_download(self):
        model_id = "hf-internal-testing/unet-pipeline-dummy"
        with tempfile.TemporaryDirectory() as tmpdirname:
1023
            _ = DiffusionPipeline.from_pretrained(model_id, cache_dir=tmpdirname, force_download=True)
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
            local_repo_name = "--".join(["models"] + model_id.split("/"))
            snapshot_dir = os.path.join(tmpdirname, local_repo_name, "snapshots")
            snapshot_dir = os.path.join(snapshot_dir, os.listdir(snapshot_dir)[0])

            # inspect all downloaded files to make sure that everything is included
            assert os.path.isfile(os.path.join(snapshot_dir, DiffusionPipeline.config_name))
            assert os.path.isfile(os.path.join(snapshot_dir, CONFIG_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, SCHEDULER_CONFIG_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, WEIGHTS_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, "scheduler", SCHEDULER_CONFIG_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, "unet", WEIGHTS_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, "unet", WEIGHTS_NAME))
            # let's make sure the super large numpy file:
            # https://huggingface.co/hf-internal-testing/unet-pipeline-dummy/blob/main/big_array.npy
            # is not downloaded, but all the expected ones
            assert not os.path.isfile(os.path.join(snapshot_dir, "big_array.npy"))

1041
1042
    def test_warning_unused_kwargs(self):
        model_id = "hf-internal-testing/unet-pipeline-dummy"
1043
        logger = logging.get_logger("diffusers.pipelines")
1044
1045
        with tempfile.TemporaryDirectory() as tmpdirname:
            with CaptureLogger(logger) as cap_logger:
1046
                DiffusionPipeline.from_pretrained(
1047
1048
1049
1050
                    model_id,
                    not_used=True,
                    cache_dir=tmpdirname,
                    force_download=True,
1051
                )
1052

1053
        assert (
1054
1055
            cap_logger.out.strip().split("\n")[-1]
            == "Keyword arguments {'not_used': True} are not expected by DDPMPipeline and will be ignored."
1056
        )
1057

1058
    def test_from_save_pretrained(self):
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
        # 1. Load models
        model = UNet2DModel(
            block_out_channels=(32, 64),
            layers_per_block=2,
            sample_size=32,
            in_channels=3,
            out_channels=3,
            down_block_types=("DownBlock2D", "AttnDownBlock2D"),
            up_block_types=("AttnUpBlock2D", "UpBlock2D"),
        )
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
        scheduler = DDPMScheduler(num_train_timesteps=10)

        ddpm = DDPMPipeline(model, scheduler)
        ddpm.to(torch_device)
        ddpm.set_progress_bar_config(disable=None)

        with tempfile.TemporaryDirectory() as tmpdirname:
            ddpm.save_pretrained(tmpdirname)
            new_ddpm = DDPMPipeline.from_pretrained(tmpdirname)
            new_ddpm.to(torch_device)

        generator = torch.Generator(device=torch_device).manual_seed(0)
        image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images

        generator = torch.Generator(device=torch_device).manual_seed(0)
        new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images

        assert np.abs(image - new_image).sum() < 1e-5, "Models don't give the same forward pass"

    @require_torch_2
    def test_from_save_pretrained_dynamo(self):
        # 1. Load models
        model = UNet2DModel(
            block_out_channels=(32, 64),
            layers_per_block=2,
            sample_size=32,
            in_channels=3,
            out_channels=3,
            down_block_types=("DownBlock2D", "AttnDownBlock2D"),
            up_block_types=("AttnUpBlock2D", "UpBlock2D"),
        )
        model = torch.compile(model)
        scheduler = DDPMScheduler(num_train_timesteps=10)
1102

1103
        ddpm = DDPMPipeline(model, scheduler)
1104
        ddpm.to(torch_device)
1105
        ddpm.set_progress_bar_config(disable=None)
1106
1107
1108

        with tempfile.TemporaryDirectory() as tmpdirname:
            ddpm.save_pretrained(tmpdirname)
1109
            new_ddpm = DDPMPipeline.from_pretrained(tmpdirname)
1110
            new_ddpm.to(torch_device)
1111

1112
        generator = torch.Generator(device=torch_device).manual_seed(0)
Patrick von Platen's avatar
Patrick von Platen committed
1113
        image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images
1114

1115
        generator = torch.Generator(device=torch_device).manual_seed(0)
Patrick von Platen's avatar
Patrick von Platen committed
1116
        new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images
1117
1118
1119
1120
1121
1122

        assert np.abs(image - new_image).sum() < 1e-5, "Models don't give the same forward pass"

    def test_from_pretrained_hub(self):
        model_path = "google/ddpm-cifar10-32"

1123
        scheduler = DDPMScheduler(num_train_timesteps=10)
1124

1125
        ddpm = DDPMPipeline.from_pretrained(model_path, scheduler=scheduler)
1126
        ddpm = ddpm.to(torch_device)
1127
        ddpm.set_progress_bar_config(disable=None)
1128

1129
        ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler)
1130
        ddpm_from_hub = ddpm_from_hub.to(torch_device)
1131
        ddpm_from_hub.set_progress_bar_config(disable=None)
1132

1133
        generator = torch.Generator(device=torch_device).manual_seed(0)
Patrick von Platen's avatar
Patrick von Platen committed
1134
        image = ddpm(generator=generator, num_inference_steps=5, output_type="numpy").images
1135

1136
        generator = torch.Generator(device=torch_device).manual_seed(0)
Patrick von Platen's avatar
Patrick von Platen committed
1137
        new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="numpy").images
1138
1139
1140
1141
1142
1143

        assert np.abs(image - new_image).sum() < 1e-5, "Models don't give the same forward pass"

    def test_from_pretrained_hub_pass_model(self):
        model_path = "google/ddpm-cifar10-32"

1144
1145
        scheduler = DDPMScheduler(num_train_timesteps=10)

1146
        # pass unet into DiffusionPipeline
1147
1148
        unet = UNet2DModel.from_pretrained(model_path)
        ddpm_from_hub_custom_model = DiffusionPipeline.from_pretrained(model_path, unet=unet, scheduler=scheduler)
1149
        ddpm_from_hub_custom_model = ddpm_from_hub_custom_model.to(torch_device)
1150
        ddpm_from_hub_custom_model.set_progress_bar_config(disable=None)
1151

1152
        ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler)
1153
        ddpm_from_hub = ddpm_from_hub.to(torch_device)
1154
        ddpm_from_hub_custom_model.set_progress_bar_config(disable=None)
1155

1156
        generator = torch.Generator(device=torch_device).manual_seed(0)
Patrick von Platen's avatar
Patrick von Platen committed
1157
        image = ddpm_from_hub_custom_model(generator=generator, num_inference_steps=5, output_type="numpy").images
1158

1159
        generator = torch.Generator(device=torch_device).manual_seed(0)
Patrick von Platen's avatar
Patrick von Platen committed
1160
        new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="numpy").images
1161
1162
1163
1164
1165
1166

        assert np.abs(image - new_image).sum() < 1e-5, "Models don't give the same forward pass"

    def test_output_format(self):
        model_path = "google/ddpm-cifar10-32"

1167
        scheduler = DDIMScheduler.from_pretrained(model_path)
Patrick von Platen's avatar
Patrick von Platen committed
1168
        pipe = DDIMPipeline.from_pretrained(model_path, scheduler=scheduler)
1169
        pipe.to(torch_device)
1170
        pipe.set_progress_bar_config(disable=None)
1171

1172
        images = pipe(output_type="numpy").images
1173
1174
1175
        assert images.shape == (1, 32, 32, 3)
        assert isinstance(images, np.ndarray)

1176
        images = pipe(output_type="pil", num_inference_steps=4).images
1177
1178
1179
1180
1181
        assert isinstance(images, list)
        assert len(images) == 1
        assert isinstance(images[0], PIL.Image.Image)

        # use PIL by default
1182
        images = pipe(num_inference_steps=4).images
1183
1184
1185
        assert isinstance(images, list)
        assert isinstance(images[0], PIL.Image.Image)

1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
    def test_from_flax_from_pt(self):
        pipe_pt = StableDiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
        )
        pipe_pt.to(torch_device)

        if not is_flax_available():
            raise ImportError("Make sure flax is installed.")

        from diffusers import FlaxStableDiffusionPipeline

        with tempfile.TemporaryDirectory() as tmpdirname:
            pipe_pt.save_pretrained(tmpdirname)

            pipe_flax, params = FlaxStableDiffusionPipeline.from_pretrained(
                tmpdirname, safety_checker=None, from_pt=True
            )

        with tempfile.TemporaryDirectory() as tmpdirname:
            pipe_flax.save_pretrained(tmpdirname, params=params)
            pipe_pt_2 = StableDiffusionPipeline.from_pretrained(tmpdirname, safety_checker=None, from_flax=True)
            pipe_pt_2.to(torch_device)

        prompt = "Hello"

        generator = torch.manual_seed(0)
        image_0 = pipe_pt(
            [prompt],
            generator=generator,
            num_inference_steps=2,
            output_type="np",
        ).images[0]

        generator = torch.manual_seed(0)
        image_1 = pipe_pt_2(
            [prompt],
            generator=generator,
            num_inference_steps=2,
            output_type="np",
        ).images[0]

        assert np.abs(image_0 - image_1).sum() < 1e-5, "Models don't give the same forward pass"

1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
    @require_compel
    def test_weighted_prompts_compel(self):
        from compel import Compel

        pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
        pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
        pipe.enable_model_cpu_offload()
        pipe.enable_attention_slicing()

        compel = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder)

        prompt = "a red cat playing with a ball{}"

        prompts = [prompt.format(s) for s in ["", "++", "--"]]

        prompt_embeds = compel(prompts)

        generator = [torch.Generator(device="cpu").manual_seed(33) for _ in range(prompt_embeds.shape[0])]

        images = pipe(
            prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20, output_type="numpy"
        ).images

        for i, image in enumerate(images):
            expected_image = load_numpy(
                "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
                f"/compel/forest_{i}.npy"
            )

1258
            assert np.abs(image - expected_image).max() < 1e-2
1259

1260
1261
1262
1263
1264
1265
1266
1267
1268
1269

@nightly
@require_torch_gpu
class PipelineNightlyTests(unittest.TestCase):
    def tearDown(self):
        # clean up the VRAM after each test
        super().tearDown()
        gc.collect()
        torch.cuda.empty_cache()

1270
1271
    def test_ddpm_ddim_equality_batched(self):
        seed = 0
1272
        model_id = "google/ddpm-cifar10-32"
1273

1274
        unet = UNet2DModel.from_pretrained(model_id)
1275
1276
        ddpm_scheduler = DDPMScheduler()
        ddim_scheduler = DDIMScheduler()
1277

1278
1279
1280
        ddpm = DDPMPipeline(unet=unet, scheduler=ddpm_scheduler)
        ddpm.to(torch_device)
        ddpm.set_progress_bar_config(disable=None)
1281

1282
1283
1284
        ddim = DDIMPipeline(unet=unet, scheduler=ddim_scheduler)
        ddim.to(torch_device)
        ddim.set_progress_bar_config(disable=None)
1285

1286
1287
        generator = torch.Generator(device=torch_device).manual_seed(seed)
        ddpm_images = ddpm(batch_size=2, generator=generator, output_type="numpy").images
1288

1289
        generator = torch.Generator(device=torch_device).manual_seed(seed)
1290
        ddim_images = ddim(
1291
            batch_size=2,
1292
1293
1294
1295
1296
            generator=generator,
            num_inference_steps=1000,
            eta=1.0,
            output_type="numpy",
            use_clipped_model_output=True,  # Need this to make DDIM match DDPM
1297
        ).images
1298

1299
1300
        # the values aren't exactly equal, but the images look the same visually
        assert np.abs(ddpm_images - ddim_images).max() < 1e-1