test_pipelines.py 93.4 KB
Newer Older
1
# coding=utf-8
2
# Copyright 2024 HuggingFace Inc.
3
4
5
6
7
8
9
10
11
12
13
14
15
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

16
import gc
17
import json
18
import os
19
import random
20
import shutil
21
import sys
22
import tempfile
23
import traceback
24
import unittest
25
import unittest.mock as mock
26
27

import numpy as np
Anh71me's avatar
Anh71me committed
28
import PIL.Image
29
import requests_mock
30
import safetensors.torch
31
import torch
32
import torch.nn as nn
33
from huggingface_hub import snapshot_download
34
35
from parameterized import parameterized
from PIL import Image
36
from requests.exceptions import HTTPError
37
from transformers import CLIPImageProcessor, CLIPModel, CLIPTextConfig, CLIPTextModel, CLIPTokenizer
38

39
from diffusers import (
40
    AutoencoderKL,
41
    ConfigMixin,
42
43
44
45
    DDIMPipeline,
    DDIMScheduler,
    DDPMPipeline,
    DDPMScheduler,
46
    DiffusionPipeline,
47
48
49
50
    DPMSolverMultistepScheduler,
    EulerAncestralDiscreteScheduler,
    EulerDiscreteScheduler,
    LMSDiscreteScheduler,
51
    ModelMixin,
52
    PNDMScheduler,
53
    StableDiffusionImg2ImgPipeline,
54
    StableDiffusionInpaintPipelineLegacy,
55
    StableDiffusionPipeline,
56
    UNet2DConditionModel,
57
    UNet2DModel,
58
    UniPCMultistepScheduler,
59
    logging,
60
)
Sayak Paul's avatar
Sayak Paul committed
61
from diffusers.pipelines.pipeline_utils import _get_pipeline_class
62
from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
63
64
65
66
from diffusers.utils import (
    CONFIG_NAME,
    WEIGHTS_NAME,
)
67
68
from diffusers.utils.testing_utils import (
    CaptureLogger,
69
    backend_empty_cache,
70
    enable_full_determinism,
Dhruv Nair's avatar
Dhruv Nair committed
71
    floats_tensor,
72
    get_python_version,
73
    get_tests_dir,
74
    is_torch_compile,
75
    load_numpy,
Dhruv Nair's avatar
Dhruv Nair committed
76
    nightly,
77
78
    require_compel,
    require_flax,
Marc Sun's avatar
Marc Sun committed
79
    require_hf_hub_version_greater,
80
    require_onnxruntime,
Dhruv Nair's avatar
Dhruv Nair committed
81
    require_torch_2,
82
    require_torch_accelerator,
Marc Sun's avatar
Marc Sun committed
83
    require_transformers_version_greater,
84
    run_test_in_subprocess,
Dhruv Nair's avatar
Dhruv Nair committed
85
86
    slow,
    torch_device,
87
)
Dhruv Nair's avatar
Dhruv Nair committed
88
from diffusers.utils.torch_utils import is_compiled_module
89
90


91
enable_full_determinism()
92
93


94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
# Will be run via run_test_in_subprocess
def _test_from_save_pretrained_dynamo(in_queue, out_queue, timeout):
    error = None
    try:
        # 1. Load models
        model = UNet2DModel(
            block_out_channels=(32, 64),
            layers_per_block=2,
            sample_size=32,
            in_channels=3,
            out_channels=3,
            down_block_types=("DownBlock2D", "AttnDownBlock2D"),
            up_block_types=("AttnUpBlock2D", "UpBlock2D"),
        )
        model = torch.compile(model)
        scheduler = DDPMScheduler(num_train_timesteps=10)

        ddpm = DDPMPipeline(model, scheduler)
112
113
114
115
116

        # previous diffusers versions stripped compilation off
        # compiled modules
        assert is_compiled_module(ddpm.unet)

117
118
119
120
121
122
123
124
125
        ddpm.to(torch_device)
        ddpm.set_progress_bar_config(disable=None)

        with tempfile.TemporaryDirectory() as tmpdirname:
            ddpm.save_pretrained(tmpdirname)
            new_ddpm = DDPMPipeline.from_pretrained(tmpdirname)
            new_ddpm.to(torch_device)

        generator = torch.Generator(device=torch_device).manual_seed(0)
126
        image = ddpm(generator=generator, num_inference_steps=5, output_type="np").images
127
128

        generator = torch.Generator(device=torch_device).manual_seed(0)
129
        new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="np").images
130

131
        assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass"
132
133
134
135
136
137
138
139
    except Exception:
        error = f"{traceback.format_exc()}"

    results = {"error": error}
    out_queue.put(results, timeout=timeout)
    out_queue.join()


140
141
142
class CustomEncoder(ModelMixin, ConfigMixin):
    def __init__(self):
        super().__init__()
143
        self.linear = nn.Linear(3, 3)
144
145
146
147
148
149
150
151


class CustomPipeline(DiffusionPipeline):
    def __init__(self, encoder: CustomEncoder, scheduler: DDIMScheduler):
        super().__init__()
        self.register_modules(encoder=encoder, scheduler=scheduler)


152
class DownloadTests(unittest.TestCase):
153
    @unittest.skip("Flaky behaviour on CI. Re-enable after migrating to new runners")
154
155
156
157
158
159
160
    def test_one_request_upon_cached(self):
        # TODO: For some reason this test fails on MPS where no HEAD call is made.
        if torch_device == "mps":
            return

        with tempfile.TemporaryDirectory() as tmpdirname:
            with requests_mock.mock(real_http=True) as m:
161
                DiffusionPipeline.download("hf-internal-testing/tiny-stable-diffusion-pipe", cache_dir=tmpdirname)
162
163

            download_requests = [r.method for r in m.request_history]
164
            assert download_requests.count("HEAD") == 15, "15 calls to files"
165
166
            assert download_requests.count("GET") == 17, "15 calls to files + model_info + model_index.json"
            assert (
167
                len(download_requests) == 32
168
169
170
171
172
173
174
175
            ), "2 calls per file (15 files) + send_telemetry, model_info and model_index.json"

            with requests_mock.mock(real_http=True) as m:
                DiffusionPipeline.download(
                    "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname
                )

            cache_requests = [r.method for r in m.request_history]
176
            assert cache_requests.count("HEAD") == 1, "model_index.json is only HEAD"
177
178
179
180
181
            assert cache_requests.count("GET") == 1, "model info is only GET"
            assert (
                len(cache_requests) == 2
            ), "We should call only `model_info` to check for _commit hash and `send_telemetry`"

182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
    def test_less_downloads_passed_object(self):
        with tempfile.TemporaryDirectory() as tmpdirname:
            cached_folder = DiffusionPipeline.download(
                "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname
            )

            # make sure safety checker is not downloaded
            assert "safety_checker" not in os.listdir(cached_folder)

            # make sure rest is downloaded
            assert "unet" in os.listdir(cached_folder)
            assert "tokenizer" in os.listdir(cached_folder)
            assert "vae" in os.listdir(cached_folder)
            assert "model_index.json" in os.listdir(cached_folder)
            assert "scheduler" in os.listdir(cached_folder)
            assert "feature_extractor" in os.listdir(cached_folder)

199
    @unittest.skip("Flaky behaviour on CI. Re-enable after migrating to new runners")
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
    def test_less_downloads_passed_object_calls(self):
        # TODO: For some reason this test fails on MPS where no HEAD call is made.
        if torch_device == "mps":
            return

        with tempfile.TemporaryDirectory() as tmpdirname:
            with requests_mock.mock(real_http=True) as m:
                DiffusionPipeline.download(
                    "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname
                )

            download_requests = [r.method for r in m.request_history]
            # 15 - 2 because no call to config or model file for `safety_checker`
            assert download_requests.count("HEAD") == 13, "13 calls to files"
            # 17 - 2 because no call to config or model file for `safety_checker`
            assert download_requests.count("GET") == 15, "13 calls to files + model_info + model_index.json"
            assert (
                len(download_requests) == 28
            ), "2 calls per file (13 files) + send_telemetry, model_info and model_index.json"

            with requests_mock.mock(real_http=True) as m:
                DiffusionPipeline.download(
                    "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname
                )

            cache_requests = [r.method for r in m.request_history]
            assert cache_requests.count("HEAD") == 1, "model_index.json is only HEAD"
            assert cache_requests.count("GET") == 1, "model info is only GET"
            assert (
                len(cache_requests) == 2
            ), "We should call only `model_info` to check for _commit hash and `send_telemetry`"

232
233
234
    def test_download_only_pytorch(self):
        with tempfile.TemporaryDirectory() as tmpdirname:
            # pipeline has Flax weights
235
            tmpdirname = DiffusionPipeline.download(
236
237
238
                "hf-internal-testing/tiny-stable-diffusion-pipe", safety_checker=None, cache_dir=tmpdirname
            )

239
            all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
240
241
242
243
244
            files = [item for sublist in all_root_files for item in sublist]

            # None of the downloaded files should be a flax file even if we have some here:
            # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_flax_model.msgpack
            assert not any(f.endswith(".msgpack") for f in files)
245
246
247
            # We need to never convert this tiny model to safetensors for this test to pass
            assert not any(f.endswith(".safetensors") for f in files)

248
249
250
251
252
253
254
255
256
257
258
    def test_force_safetensors_error(self):
        with tempfile.TemporaryDirectory() as tmpdirname:
            # pipeline has Flax weights
            with self.assertRaises(EnvironmentError):
                tmpdirname = DiffusionPipeline.download(
                    "hf-internal-testing/tiny-stable-diffusion-pipe-no-safetensors",
                    safety_checker=None,
                    cache_dir=tmpdirname,
                    use_safetensors=True,
                )

259
260
261
    def test_download_safetensors(self):
        with tempfile.TemporaryDirectory() as tmpdirname:
            # pipeline has Flax weights
262
            tmpdirname = DiffusionPipeline.download(
263
264
265
266
267
                "hf-internal-testing/tiny-stable-diffusion-pipe-safetensors",
                safety_checker=None,
                cache_dir=tmpdirname,
            )

268
            all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
269
270
271
272
273
            files = [item for sublist in all_root_files for item in sublist]

            # None of the downloaded files should be a pytorch file even if we have some here:
            # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_flax_model.msgpack
            assert not any(f.endswith(".bin") for f in files)
274

275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
    def test_download_safetensors_index(self):
        for variant in ["fp16", None]:
            with tempfile.TemporaryDirectory() as tmpdirname:
                tmpdirname = DiffusionPipeline.download(
                    "hf-internal-testing/tiny-stable-diffusion-pipe-indexes",
                    cache_dir=tmpdirname,
                    use_safetensors=True,
                    variant=variant,
                )

                all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
                files = [item for sublist in all_root_files for item in sublist]

                # None of the downloaded files should be a safetensors file even if we have some here:
                # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe-indexes/tree/main/text_encoder
                if variant is None:
                    assert not any("fp16" in f for f in files)
                else:
                    model_files = [f for f in files if "safetensors" in f]
                    assert all("fp16" in f for f in model_files)

                assert len([f for f in files if ".safetensors" in f]) == 8
                assert not any(".bin" in f for f in files)

    def test_download_bin_index(self):
        for variant in ["fp16", None]:
            with tempfile.TemporaryDirectory() as tmpdirname:
                tmpdirname = DiffusionPipeline.download(
                    "hf-internal-testing/tiny-stable-diffusion-pipe-indexes",
                    cache_dir=tmpdirname,
                    use_safetensors=False,
                    variant=variant,
                )

                all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
                files = [item for sublist in all_root_files for item in sublist]

                # None of the downloaded files should be a safetensors file even if we have some here:
                # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe-indexes/tree/main/text_encoder
                if variant is None:
                    assert not any("fp16" in f for f in files)
                else:
                    model_files = [f for f in files if "bin" in f]
                    assert all("fp16" in f for f in model_files)

                assert len([f for f in files if ".bin" in f]) == 8
                assert not any(".safetensors" in f for f in files)

323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
    def test_download_no_openvino_by_default(self):
        with tempfile.TemporaryDirectory() as tmpdirname:
            tmpdirname = DiffusionPipeline.download(
                "hf-internal-testing/tiny-stable-diffusion-open-vino",
                cache_dir=tmpdirname,
            )

            all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
            files = [item for sublist in all_root_files for item in sublist]

            # make sure that by default no openvino weights are downloaded
            assert all((f.endswith(".json") or f.endswith(".bin") or f.endswith(".txt")) for f in files)
            assert not any("openvino_" in f for f in files)

    def test_download_no_onnx_by_default(self):
        with tempfile.TemporaryDirectory() as tmpdirname:
            tmpdirname = DiffusionPipeline.download(
340
                "hf-internal-testing/tiny-stable-diffusion-xl-pipe",
341
                cache_dir=tmpdirname,
342
                use_safetensors=False,
343
344
345
346
347
            )

            all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
            files = [item for sublist in all_root_files for item in sublist]

348
            # make sure that by default no onnx weights are downloaded for non-ONNX pipelines
349
350
351
            assert all((f.endswith(".json") or f.endswith(".bin") or f.endswith(".txt")) for f in files)
            assert not any((f.endswith(".onnx") or f.endswith(".pb")) for f in files)

352
353
    @require_onnxruntime
    def test_download_onnx_by_default_for_onnx_pipelines(self):
354
355
356
357
358
359
360
361
362
        with tempfile.TemporaryDirectory() as tmpdirname:
            tmpdirname = DiffusionPipeline.download(
                "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline",
                cache_dir=tmpdirname,
            )

            all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
            files = [item for sublist in all_root_files for item in sublist]

363
            # make sure that by default onnx weights are downloaded for ONNX pipelines
364
365
366
367
            assert any((f.endswith(".json") or f.endswith(".bin") or f.endswith(".txt")) for f in files)
            assert any((f.endswith(".onnx")) for f in files)
            assert any((f.endswith(".pb")) for f in files)

368
369
370
371
372
    def test_download_no_safety_checker(self):
        prompt = "hello"
        pipe = StableDiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
        )
373
        pipe = pipe.to(torch_device)
374
        generator = torch.manual_seed(0)
375
        out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="np").images
376
377

        pipe_2 = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch")
378
        pipe_2 = pipe_2.to(torch_device)
379
        generator = torch.manual_seed(0)
380
        out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="np").images
381
382
383
384
385
386
387
388

        assert np.max(np.abs(out - out_2)) < 1e-3

    def test_load_no_safety_checker_explicit_locally(self):
        prompt = "hello"
        pipe = StableDiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
        )
389
        pipe = pipe.to(torch_device)
390
        generator = torch.manual_seed(0)
391
        out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="np").images
392
393
394
395

        with tempfile.TemporaryDirectory() as tmpdirname:
            pipe.save_pretrained(tmpdirname)
            pipe_2 = StableDiffusionPipeline.from_pretrained(tmpdirname, safety_checker=None)
396
            pipe_2 = pipe_2.to(torch_device)
397

398
            generator = torch.manual_seed(0)
399

400
            out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="np").images
401
402
403
404
405
406

        assert np.max(np.abs(out - out_2)) < 1e-3

    def test_load_no_safety_checker_default_locally(self):
        prompt = "hello"
        pipe = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch")
407
        pipe = pipe.to(torch_device)
408
409

        generator = torch.manual_seed(0)
410
        out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="np").images
411
412
413
414

        with tempfile.TemporaryDirectory() as tmpdirname:
            pipe.save_pretrained(tmpdirname)
            pipe_2 = StableDiffusionPipeline.from_pretrained(tmpdirname)
415
            pipe_2 = pipe_2.to(torch_device)
416

417
            generator = torch.manual_seed(0)
418

419
            out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="np").images
420
421
422

        assert np.max(np.abs(out - out_2)) < 1e-3

423
424
425
426
427
428
429
430
431
    def test_cached_files_are_used_when_no_internet(self):
        # A mock response for an HTTP head request to emulate server down
        response_mock = mock.Mock()
        response_mock.status_code = 500
        response_mock.headers = {}
        response_mock.raise_for_status.side_effect = HTTPError
        response_mock.json.return_value = {}

        # Download this model to make sure it's in the cache.
432
        orig_pipe = DiffusionPipeline.from_pretrained(
433
434
435
436
437
438
439
            "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
        )
        orig_comps = {k: v for k, v in orig_pipe.components.items() if hasattr(v, "parameters")}

        # Under the mock environment we get a 500 error when trying to reach the model.
        with mock.patch("requests.request", return_value=response_mock):
            # Download this model to make sure it's in the cache.
440
            pipe = DiffusionPipeline.from_pretrained(
441
                "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
442
443
444
445
446
            )
            comps = {k: v for k, v in pipe.components.items() if hasattr(v, "parameters")}

        for m1, m2 in zip(orig_comps.values(), comps.values()):
            for p1, p2 in zip(m1.parameters(), m2.parameters()):
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
                if p1.data.ne(p2.data).sum() > 0:
                    assert False, "Parameters not the same!"

    def test_local_files_only_are_used_when_no_internet(self):
        # A mock response for an HTTP head request to emulate server down
        response_mock = mock.Mock()
        response_mock.status_code = 500
        response_mock.headers = {}
        response_mock.raise_for_status.side_effect = HTTPError
        response_mock.json.return_value = {}

        # first check that with local files only the pipeline can only be used if cached
        with self.assertRaises(FileNotFoundError):
            with tempfile.TemporaryDirectory() as tmpdirname:
                orig_pipe = DiffusionPipeline.from_pretrained(
                    "hf-internal-testing/tiny-stable-diffusion-torch", local_files_only=True, cache_dir=tmpdirname
                )

        # now download
        orig_pipe = DiffusionPipeline.download("hf-internal-testing/tiny-stable-diffusion-torch")

        # make sure it can be loaded with local_files_only
        orig_pipe = DiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", local_files_only=True
        )
        orig_comps = {k: v for k, v in orig_pipe.components.items() if hasattr(v, "parameters")}

        # Under the mock environment we get a 500 error when trying to connect to the internet.
        # Make sure it works local_files_only only works here!
        with mock.patch("requests.request", return_value=response_mock):
            # Download this model to make sure it's in the cache.
            pipe = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch")
            comps = {k: v for k, v in pipe.components.items() if hasattr(v, "parameters")}

        for m1, m2 in zip(orig_comps.values(), comps.values()):
            for p1, p2 in zip(m1.parameters(), m2.parameters()):
483
484
485
486
                if p1.data.ne(p2.data).sum() > 0:
                    assert False, "Parameters not the same!"

    def test_download_from_variant_folder(self):
487
488
        for use_safetensors in [False, True]:
            other_format = ".bin" if use_safetensors else ".safetensors"
489
            with tempfile.TemporaryDirectory() as tmpdirname:
490
                tmpdirname = StableDiffusionPipeline.download(
491
492
493
                    "hf-internal-testing/stable-diffusion-all-variants",
                    cache_dir=tmpdirname,
                    use_safetensors=use_safetensors,
494
                )
495
                all_root_files = [t[-1] for t in os.walk(tmpdirname)]
496
497
498
499
500
501
502
503
504
505
                files = [item for sublist in all_root_files for item in sublist]

                # None of the downloaded files should be a variant file even if we have some here:
                # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet
                assert len(files) == 15, f"We should only download 15 files, not {len(files)}"
                assert not any(f.endswith(other_format) for f in files)
                # no variants
                assert not any(len(f.split(".")) == 3 for f in files)

    def test_download_variant_all(self):
506
507
508
        for use_safetensors in [False, True]:
            other_format = ".bin" if use_safetensors else ".safetensors"
            this_format = ".safetensors" if use_safetensors else ".bin"
509
510
511
            variant = "fp16"

            with tempfile.TemporaryDirectory() as tmpdirname:
512
                tmpdirname = StableDiffusionPipeline.download(
513
514
515
516
                    "hf-internal-testing/stable-diffusion-all-variants",
                    cache_dir=tmpdirname,
                    variant=variant,
                    use_safetensors=use_safetensors,
517
                )
518
                all_root_files = [t[-1] for t in os.walk(tmpdirname)]
519
520
521
522
523
524
525
526
527
528
529
530
                files = [item for sublist in all_root_files for item in sublist]

                # None of the downloaded files should be a non-variant file even if we have some here:
                # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet
                assert len(files) == 15, f"We should only download 15 files, not {len(files)}"
                # unet, vae, text_encoder, safety_checker
                assert len([f for f in files if f.endswith(f"{variant}{this_format}")]) == 4
                # all checkpoints should have variant ending
                assert not any(f.endswith(this_format) and not f.endswith(f"{variant}{this_format}") for f in files)
                assert not any(f.endswith(other_format) for f in files)

    def test_download_variant_partly(self):
531
532
533
        for use_safetensors in [False, True]:
            other_format = ".bin" if use_safetensors else ".safetensors"
            this_format = ".safetensors" if use_safetensors else ".bin"
534
535
536
            variant = "no_ema"

            with tempfile.TemporaryDirectory() as tmpdirname:
537
                tmpdirname = StableDiffusionPipeline.download(
538
539
540
541
                    "hf-internal-testing/stable-diffusion-all-variants",
                    cache_dir=tmpdirname,
                    variant=variant,
                    use_safetensors=use_safetensors,
542
                )
543
                all_root_files = [t[-1] for t in os.walk(tmpdirname)]
544
545
                files = [item for sublist in all_root_files for item in sublist]

546
                unet_files = os.listdir(os.path.join(tmpdirname, "unet"))
547
548
549
550
551
552
553
554
555
556
557

                # Some of the downloaded files should be a non-variant file, check:
                # https://huggingface.co/hf-internal-testing/stable-diffusion-all-variants/tree/main/unet
                assert len(files) == 15, f"We should only download 15 files, not {len(files)}"
                # only unet has "no_ema" variant
                assert f"diffusion_pytorch_model.{variant}{this_format}" in unet_files
                assert len([f for f in files if f.endswith(f"{variant}{this_format}")]) == 1
                # vae, safety_checker and text_encoder should have no variant
                assert sum(f.endswith(this_format) and not f.endswith(f"{variant}{this_format}") for f in files) == 3
                assert not any(f.endswith(other_format) for f in files)

558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
    def test_download_variants_with_sharded_checkpoints(self):
        # Here we test for downloading of "variant" files belonging to the `unet` and
        # the `text_encoder`. Their checkpoints can be sharded.
        for use_safetensors in [True, False]:
            for variant in ["fp16", None]:
                with tempfile.TemporaryDirectory() as tmpdirname:
                    tmpdirname = DiffusionPipeline.download(
                        "hf-internal-testing/tiny-stable-diffusion-pipe-variants-right-format",
                        safety_checker=None,
                        cache_dir=tmpdirname,
                        variant=variant,
                        use_safetensors=use_safetensors,
                    )

                    all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
                    files = [item for sublist in all_root_files for item in sublist]

                    # Check for `model_ext` and `variant`.
                    model_ext = ".safetensors" if use_safetensors else ".bin"
                    unexpected_ext = ".bin" if use_safetensors else ".safetensors"
                    model_files = [f for f in files if f.endswith(model_ext)]
                    assert not any(f.endswith(unexpected_ext) for f in files)
                    assert all(variant in f for f in model_files if f.endswith(model_ext) and variant is not None)

    def test_download_legacy_variants_with_sharded_ckpts_raises_warning(self):
        repo_id = "hf-internal-testing/tiny-stable-diffusion-pipe-variants-all-kinds"
        logger = logging.get_logger("diffusers.pipelines.pipeline_utils")
        deprecated_warning_msg = "Warning: The repository contains sharded checkpoints for variant"

        for is_local in [True, False]:
            with CaptureLogger(logger) as cap_logger:
                with tempfile.TemporaryDirectory() as tmpdirname:
                    local_repo_id = repo_id
                    if is_local:
                        local_repo_id = snapshot_download(repo_id, cache_dir=tmpdirname)

                    _ = DiffusionPipeline.from_pretrained(
                        local_repo_id,
                        safety_checker=None,
                        variant="fp16",
                        use_safetensors=True,
                    )
            assert deprecated_warning_msg in str(cap_logger), "Deprecation warning not found in logs"

602
603
604
605
606
607
608
609
    def test_download_safetensors_only_variant_exists_for_model(self):
        variant = None
        use_safetensors = True

        # text encoder is missing no variant weights, so the following can't work
        with tempfile.TemporaryDirectory() as tmpdirname:
            with self.assertRaises(OSError) as error_context:
                tmpdirname = StableDiffusionPipeline.from_pretrained(
610
                    "hf-internal-testing/stable-diffusion-broken-variants",
611
612
                    cache_dir=tmpdirname,
                    variant=variant,
613
                    use_safetensors=use_safetensors,
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
                )
            assert "Error no file name" in str(error_context.exception)

        # text encoder has fp16 variants so we can load it
        with tempfile.TemporaryDirectory() as tmpdirname:
            tmpdirname = StableDiffusionPipeline.download(
                "hf-internal-testing/stable-diffusion-broken-variants",
                use_safetensors=use_safetensors,
                cache_dir=tmpdirname,
                variant="fp16",
            )
            all_root_files = [t[-1] for t in os.walk(tmpdirname)]
            files = [item for sublist in all_root_files for item in sublist]
            # None of the downloaded files should be a non-variant file even if we have some here:
            # https://huggingface.co/hf-internal-testing/stable-diffusion-broken-variants/tree/main/unet
            assert len(files) == 15, f"We should only download 15 files, not {len(files)}"

    def test_download_bin_only_variant_exists_for_model(self):
        variant = None
        use_safetensors = False

        # text encoder is missing Non-variant weights, so the following can't work
        with tempfile.TemporaryDirectory() as tmpdirname:
            with self.assertRaises(OSError) as error_context:
                tmpdirname = StableDiffusionPipeline.from_pretrained(
                    "hf-internal-testing/stable-diffusion-broken-variants",
640
                    cache_dir=tmpdirname,
641
642
                    variant=variant,
                    use_safetensors=use_safetensors,
643
                )
644
            assert "Error no file name" in str(error_context.exception)
645

646
647
648
649
650
651
652
653
654
655
656
657
658
        # text encoder has fp16 variants so we can load it
        with tempfile.TemporaryDirectory() as tmpdirname:
            tmpdirname = StableDiffusionPipeline.download(
                "hf-internal-testing/stable-diffusion-broken-variants",
                use_safetensors=use_safetensors,
                cache_dir=tmpdirname,
                variant="fp16",
            )
            all_root_files = [t[-1] for t in os.walk(tmpdirname)]
            files = [item for sublist in all_root_files for item in sublist]
            # None of the downloaded files should be a non-variant file even if we have some here:
            # https://huggingface.co/hf-internal-testing/stable-diffusion-broken-variants/tree/main/unet
            assert len(files) == 15, f"We should only download 15 files, not {len(files)}"
659

660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
    def test_download_safetensors_variant_does_not_exist_for_model(self):
        variant = "no_ema"
        use_safetensors = True

        # text encoder is missing no_ema variant weights, so the following can't work
        with tempfile.TemporaryDirectory() as tmpdirname:
            with self.assertRaises(OSError) as error_context:
                tmpdirname = StableDiffusionPipeline.from_pretrained(
                    "hf-internal-testing/stable-diffusion-broken-variants",
                    cache_dir=tmpdirname,
                    variant=variant,
                    use_safetensors=use_safetensors,
                )

            assert "Error no file name" in str(error_context.exception)

    def test_download_bin_variant_does_not_exist_for_model(self):
        variant = "no_ema"
        use_safetensors = False

        # text encoder is missing no_ema variant weights, so the following can't work
        with tempfile.TemporaryDirectory() as tmpdirname:
            with self.assertRaises(OSError) as error_context:
                tmpdirname = StableDiffusionPipeline.from_pretrained(
                    "hf-internal-testing/stable-diffusion-broken-variants",
                    cache_dir=tmpdirname,
                    variant=variant,
                    use_safetensors=use_safetensors,
                )
            assert "Error no file name" in str(error_context.exception)
690

691
692
693
694
695
696
697
698
699
700
701
702
    def test_local_save_load_index(self):
        prompt = "hello"
        for variant in [None, "fp16"]:
            for use_safe in [True, False]:
                pipe = StableDiffusionPipeline.from_pretrained(
                    "hf-internal-testing/tiny-stable-diffusion-pipe-indexes",
                    variant=variant,
                    use_safetensors=use_safe,
                    safety_checker=None,
                )
                pipe = pipe.to(torch_device)
                generator = torch.manual_seed(0)
703
                out = pipe(prompt, num_inference_steps=2, generator=generator, output_type="np").images
704
705

                with tempfile.TemporaryDirectory() as tmpdirname:
706
                    pipe.save_pretrained(tmpdirname, variant=variant, safe_serialization=use_safe)
707
708
709
710
711
712
713
                    pipe_2 = StableDiffusionPipeline.from_pretrained(
                        tmpdirname, safe_serialization=use_safe, variant=variant
                    )
                    pipe_2 = pipe_2.to(torch_device)

                generator = torch.manual_seed(0)

714
                out_2 = pipe_2(prompt, num_inference_steps=2, generator=generator, output_type="np").images
715
716
717

                assert np.max(np.abs(out - out_2)) < 1e-3

718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
    def test_text_inversion_download(self):
        pipe = StableDiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
        )
        pipe = pipe.to(torch_device)

        num_tokens = len(pipe.tokenizer)

        # single token load local
        with tempfile.TemporaryDirectory() as tmpdirname:
            ten = {"<*>": torch.ones((32,))}
            torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin"))

            pipe.load_textual_inversion(tmpdirname)

            token = pipe.tokenizer.convert_tokens_to_ids("<*>")
            assert token == num_tokens, "Added token must be at spot `num_tokens`"
            assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 32
            assert pipe._maybe_convert_prompt("<*>", pipe.tokenizer) == "<*>"

            prompt = "hey <*>"
739
            out = pipe(prompt, num_inference_steps=1, output_type="np").images
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
            assert out.shape == (1, 128, 128, 3)

        # single token load local with weight name
        with tempfile.TemporaryDirectory() as tmpdirname:
            ten = {"<**>": 2 * torch.ones((1, 32))}
            torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin"))

            pipe.load_textual_inversion(tmpdirname, weight_name="learned_embeds.bin")

            token = pipe.tokenizer.convert_tokens_to_ids("<**>")
            assert token == num_tokens + 1, "Added token must be at spot `num_tokens`"
            assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 64
            assert pipe._maybe_convert_prompt("<**>", pipe.tokenizer) == "<**>"

            prompt = "hey <**>"
755
            out = pipe(prompt, num_inference_steps=1, output_type="np").images
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
            assert out.shape == (1, 128, 128, 3)

        # multi token load
        with tempfile.TemporaryDirectory() as tmpdirname:
            ten = {"<***>": torch.cat([3 * torch.ones((1, 32)), 4 * torch.ones((1, 32)), 5 * torch.ones((1, 32))])}
            torch.save(ten, os.path.join(tmpdirname, "learned_embeds.bin"))

            pipe.load_textual_inversion(tmpdirname)

            token = pipe.tokenizer.convert_tokens_to_ids("<***>")
            token_1 = pipe.tokenizer.convert_tokens_to_ids("<***>_1")
            token_2 = pipe.tokenizer.convert_tokens_to_ids("<***>_2")

            assert token == num_tokens + 2, "Added token must be at spot `num_tokens`"
            assert token_1 == num_tokens + 3, "Added token must be at spot `num_tokens`"
            assert token_2 == num_tokens + 4, "Added token must be at spot `num_tokens`"
            assert pipe.text_encoder.get_input_embeddings().weight[-3].sum().item() == 96
            assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 128
            assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 160
775
            assert pipe._maybe_convert_prompt("<***>", pipe.tokenizer) == "<***> <***>_1 <***>_2"
776
777

            prompt = "hey <***>"
778
            out = pipe(prompt, num_inference_steps=1, output_type="np").images
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
            assert out.shape == (1, 128, 128, 3)

        # multi token load a1111
        with tempfile.TemporaryDirectory() as tmpdirname:
            ten = {
                "string_to_param": {
                    "*": torch.cat([3 * torch.ones((1, 32)), 4 * torch.ones((1, 32)), 5 * torch.ones((1, 32))])
                },
                "name": "<****>",
            }
            torch.save(ten, os.path.join(tmpdirname, "a1111.bin"))

            pipe.load_textual_inversion(tmpdirname, weight_name="a1111.bin")

            token = pipe.tokenizer.convert_tokens_to_ids("<****>")
            token_1 = pipe.tokenizer.convert_tokens_to_ids("<****>_1")
            token_2 = pipe.tokenizer.convert_tokens_to_ids("<****>_2")

            assert token == num_tokens + 5, "Added token must be at spot `num_tokens`"
            assert token_1 == num_tokens + 6, "Added token must be at spot `num_tokens`"
            assert token_2 == num_tokens + 7, "Added token must be at spot `num_tokens`"
            assert pipe.text_encoder.get_input_embeddings().weight[-3].sum().item() == 96
            assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 128
            assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 160
803
            assert pipe._maybe_convert_prompt("<****>", pipe.tokenizer) == "<****> <****>_1 <****>_2"
804
805

            prompt = "hey <****>"
806
            out = pipe(prompt, num_inference_steps=1, output_type="np").images
807
808
            assert out.shape == (1, 128, 128, 3)

809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
        # multi embedding load
        with tempfile.TemporaryDirectory() as tmpdirname1:
            with tempfile.TemporaryDirectory() as tmpdirname2:
                ten = {"<*****>": torch.ones((32,))}
                torch.save(ten, os.path.join(tmpdirname1, "learned_embeds.bin"))

                ten = {"<******>": 2 * torch.ones((1, 32))}
                torch.save(ten, os.path.join(tmpdirname2, "learned_embeds.bin"))

                pipe.load_textual_inversion([tmpdirname1, tmpdirname2])

                token = pipe.tokenizer.convert_tokens_to_ids("<*****>")
                assert token == num_tokens + 8, "Added token must be at spot `num_tokens`"
                assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 32
                assert pipe._maybe_convert_prompt("<*****>", pipe.tokenizer) == "<*****>"

                token = pipe.tokenizer.convert_tokens_to_ids("<******>")
                assert token == num_tokens + 9, "Added token must be at spot `num_tokens`"
                assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 64
                assert pipe._maybe_convert_prompt("<******>", pipe.tokenizer) == "<******>"

                prompt = "hey <*****> <******>"
831
                out = pipe(prompt, num_inference_steps=1, output_type="np").images
832
833
                assert out.shape == (1, 128, 128, 3)

834
835
836
837
838
839
840
841
842
843
        # single token state dict load
        ten = {"<x>": torch.ones((32,))}
        pipe.load_textual_inversion(ten)

        token = pipe.tokenizer.convert_tokens_to_ids("<x>")
        assert token == num_tokens + 10, "Added token must be at spot `num_tokens`"
        assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 32
        assert pipe._maybe_convert_prompt("<x>", pipe.tokenizer) == "<x>"

        prompt = "hey <x>"
844
        out = pipe(prompt, num_inference_steps=1, output_type="np").images
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
        assert out.shape == (1, 128, 128, 3)

        # multi embedding state dict load
        ten1 = {"<xxxxx>": torch.ones((32,))}
        ten2 = {"<xxxxxx>": 2 * torch.ones((1, 32))}

        pipe.load_textual_inversion([ten1, ten2])

        token = pipe.tokenizer.convert_tokens_to_ids("<xxxxx>")
        assert token == num_tokens + 11, "Added token must be at spot `num_tokens`"
        assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 32
        assert pipe._maybe_convert_prompt("<xxxxx>", pipe.tokenizer) == "<xxxxx>"

        token = pipe.tokenizer.convert_tokens_to_ids("<xxxxxx>")
        assert token == num_tokens + 12, "Added token must be at spot `num_tokens`"
        assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 64
        assert pipe._maybe_convert_prompt("<xxxxxx>", pipe.tokenizer) == "<xxxxxx>"

        prompt = "hey <xxxxx> <xxxxxx>"
864
        out = pipe(prompt, num_inference_steps=1, output_type="np").images
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
        assert out.shape == (1, 128, 128, 3)

        # auto1111 multi-token state dict load
        ten = {
            "string_to_param": {
                "*": torch.cat([3 * torch.ones((1, 32)), 4 * torch.ones((1, 32)), 5 * torch.ones((1, 32))])
            },
            "name": "<xxxx>",
        }

        pipe.load_textual_inversion(ten)

        token = pipe.tokenizer.convert_tokens_to_ids("<xxxx>")
        token_1 = pipe.tokenizer.convert_tokens_to_ids("<xxxx>_1")
        token_2 = pipe.tokenizer.convert_tokens_to_ids("<xxxx>_2")

        assert token == num_tokens + 13, "Added token must be at spot `num_tokens`"
        assert token_1 == num_tokens + 14, "Added token must be at spot `num_tokens`"
        assert token_2 == num_tokens + 15, "Added token must be at spot `num_tokens`"
        assert pipe.text_encoder.get_input_embeddings().weight[-3].sum().item() == 96
        assert pipe.text_encoder.get_input_embeddings().weight[-2].sum().item() == 128
        assert pipe.text_encoder.get_input_embeddings().weight[-1].sum().item() == 160
        assert pipe._maybe_convert_prompt("<xxxx>", pipe.tokenizer) == "<xxxx> <xxxx>_1 <xxxx>_2"

        prompt = "hey <xxxx>"
890
        out = pipe(prompt, num_inference_steps=1, output_type="np").images
891
892
        assert out.shape == (1, 128, 128, 3)

893
894
895
896
897
898
899
900
901
        # multiple references to multi embedding
        ten = {"<cat>": torch.ones(3, 32)}
        pipe.load_textual_inversion(ten)

        assert (
            pipe._maybe_convert_prompt("<cat> <cat>", pipe.tokenizer) == "<cat> <cat>_1 <cat>_2 <cat> <cat>_1 <cat>_2"
        )

        prompt = "hey <cat> <cat>"
902
        out = pipe(prompt, num_inference_steps=1, output_type="np").images
903
904
        assert out.shape == (1, 128, 128, 3)

905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
    def test_text_inversion_multi_tokens(self):
        pipe1 = StableDiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
        )
        pipe1 = pipe1.to(torch_device)

        token1, token2 = "<*>", "<**>"
        ten1 = torch.ones((32,))
        ten2 = torch.ones((32,)) * 2

        num_tokens = len(pipe1.tokenizer)

        pipe1.load_textual_inversion(ten1, token=token1)
        pipe1.load_textual_inversion(ten2, token=token2)
        emb1 = pipe1.text_encoder.get_input_embeddings().weight

        pipe2 = StableDiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
        )
        pipe2 = pipe2.to(torch_device)
        pipe2.load_textual_inversion([ten1, ten2], token=[token1, token2])
        emb2 = pipe2.text_encoder.get_input_embeddings().weight

        pipe3 = StableDiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
        )
        pipe3 = pipe3.to(torch_device)
        pipe3.load_textual_inversion(torch.stack([ten1, ten2], dim=0), token=[token1, token2])
        emb3 = pipe3.text_encoder.get_input_embeddings().weight

        assert len(pipe1.tokenizer) == len(pipe2.tokenizer) == len(pipe3.tokenizer) == num_tokens + 2
        assert (
            pipe1.tokenizer.convert_tokens_to_ids(token1)
            == pipe2.tokenizer.convert_tokens_to_ids(token1)
            == pipe3.tokenizer.convert_tokens_to_ids(token1)
            == num_tokens
        )
        assert (
            pipe1.tokenizer.convert_tokens_to_ids(token2)
            == pipe2.tokenizer.convert_tokens_to_ids(token2)
            == pipe3.tokenizer.convert_tokens_to_ids(token2)
            == num_tokens + 1
        )
        assert emb1[num_tokens].sum().item() == emb2[num_tokens].sum().item() == emb3[num_tokens].sum().item()
        assert (
            emb1[num_tokens + 1].sum().item() == emb2[num_tokens + 1].sum().item() == emb3[num_tokens + 1].sum().item()
        )

953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
    def test_textual_inversion_unload(self):
        pipe1 = StableDiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
        )
        pipe1 = pipe1.to(torch_device)
        orig_tokenizer_size = len(pipe1.tokenizer)
        orig_emb_size = len(pipe1.text_encoder.get_input_embeddings().weight)

        token = "<*>"
        ten = torch.ones((32,))
        pipe1.load_textual_inversion(ten, token=token)
        pipe1.unload_textual_inversion()
        pipe1.load_textual_inversion(ten, token=token)
        pipe1.unload_textual_inversion()

        final_tokenizer_size = len(pipe1.tokenizer)
        final_emb_size = len(pipe1.text_encoder.get_input_embeddings().weight)
        # both should be restored to original size
        assert final_tokenizer_size == orig_tokenizer_size
        assert final_emb_size == orig_emb_size

Patrick von Platen's avatar
Patrick von Platen committed
974
975
976
977
978
979
980
981
982
983
984
985
986
    def test_download_ignore_files(self):
        # Check https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe-ignore-files/blob/72f58636e5508a218c6b3f60550dc96445547817/model_index.json#L4
        with tempfile.TemporaryDirectory() as tmpdirname:
            # pipeline has Flax weights
            tmpdirname = DiffusionPipeline.download("hf-internal-testing/tiny-stable-diffusion-pipe-ignore-files")
            all_root_files = [t[-1] for t in os.walk(os.path.join(tmpdirname))]
            files = [item for sublist in all_root_files for item in sublist]

            # None of the downloaded files should be a pytorch file even if we have some here:
            # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_flax_model.msgpack
            assert not any(f in ["vae/diffusion_pytorch_model.bin", "text_encoder/config.json"] for f in files)
            assert len(files) == 14

Marc Sun's avatar
Marc Sun committed
987
988
989
990
991
992
993
994
995
996
997
998
    def test_download_dduf_with_custom_pipeline_raises_error(self):
        with self.assertRaises(NotImplementedError):
            _ = DiffusionPipeline.download(
                "DDUF/tiny-flux-dev-pipe-dduf", dduf_file="fluxpipeline.dduf", custom_pipeline="my_pipeline"
            )

    def test_download_dduf_with_connected_pipeline_raises_error(self):
        with self.assertRaises(NotImplementedError):
            _ = DiffusionPipeline.download(
                "DDUF/tiny-flux-dev-pipe-dduf", dduf_file="fluxpipeline.dduf", load_connected_pipeline=True
            )

999
1000
1001
1002
1003
1004
1005
1006
    def test_get_pipeline_class_from_flax(self):
        flax_config = {"_class_name": "FlaxStableDiffusionPipeline"}
        config = {"_class_name": "StableDiffusionPipeline"}

        # when loading a PyTorch Pipeline from a FlaxPipeline `model_index.json`, e.g.: https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-lms-pipe/blob/7a9063578b325779f0f1967874a6771caa973cad/model_index.json#L2
        # we need to make sure that we don't load the Flax Pipeline class, but instead the PyTorch pipeline class
        assert _get_pipeline_class(DiffusionPipeline, flax_config) == _get_pipeline_class(DiffusionPipeline, config)

1007

Patrick von Platen's avatar
Patrick von Platen committed
1008
1009
1010
1011
1012
class CustomPipelineTests(unittest.TestCase):
    def test_load_custom_pipeline(self):
        pipeline = DiffusionPipeline.from_pretrained(
            "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline"
        )
1013
        pipeline = pipeline.to(torch_device)
Patrick von Platen's avatar
Patrick von Platen committed
1014
1015
1016
1017
        # NOTE that `"CustomPipeline"` is not a class that is defined in this library, but solely on the Hub
        # under https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py#L24
        assert pipeline.__class__.__name__ == "CustomPipeline"

1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
    def test_load_custom_github(self):
        pipeline = DiffusionPipeline.from_pretrained(
            "google/ddpm-cifar10-32", custom_pipeline="one_step_unet", custom_revision="main"
        )

        # make sure that on "main" pipeline gives only ones because of: https://github.com/huggingface/diffusers/pull/1690
        with torch.no_grad():
            output = pipeline()

        assert output.numel() == output.sum()

        # hack since Python doesn't like overwriting modules: https://stackoverflow.com/questions/3105801/unload-a-module-in-python
        # Could in the future work with hashes instead.
        del sys.modules["diffusers_modules.git.one_step_unet"]

        pipeline = DiffusionPipeline.from_pretrained(
            "google/ddpm-cifar10-32", custom_pipeline="one_step_unet", custom_revision="0.10.2"
        )
        with torch.no_grad():
            output = pipeline()

        assert output.numel() != output.sum()

        assert pipeline.__class__.__name__ == "UnetSchedulerOneForwardPipeline"

Patrick von Platen's avatar
Patrick von Platen committed
1043
1044
1045
1046
    def test_run_custom_pipeline(self):
        pipeline = DiffusionPipeline.from_pretrained(
            "google/ddpm-cifar10-32", custom_pipeline="hf-internal-testing/diffusers-dummy-pipeline"
        )
1047
        pipeline = pipeline.to(torch_device)
Patrick von Platen's avatar
Patrick von Platen committed
1048
1049
1050
        images, output_str = pipeline(num_inference_steps=2, output_type="np")

        assert images[0].shape == (1, 32, 32, 3)
1051

Patrick von Platen's avatar
Patrick von Platen committed
1052
1053
1054
        # compare output to https://huggingface.co/hf-internal-testing/diffusers-dummy-pipeline/blob/main/pipeline.py#L102
        assert output_str == "This is a test"

1055
1056
1057
1058
1059
    def test_remote_components(self):
        # make sure that trust remote code has to be passed
        with self.assertRaises(ValueError):
            pipeline = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-sdxl-custom-components")

M. Tolga Cangöz's avatar
M. Tolga Cangöz committed
1060
        # Check that only loading custom components "my_unet", "my_scheduler" works
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
        pipeline = DiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-sdxl-custom-components", trust_remote_code=True
        )

        assert pipeline.config.unet == ("diffusers_modules.local.my_unet_model", "MyUNetModel")
        assert pipeline.config.scheduler == ("diffusers_modules.local.my_scheduler", "MyScheduler")
        assert pipeline.__class__.__name__ == "StableDiffusionXLPipeline"

        pipeline = pipeline.to(torch_device)
        images = pipeline("test", num_inference_steps=2, output_type="np")[0]

        assert images.shape == (1, 64, 64, 3)

M. Tolga Cangöz's avatar
M. Tolga Cangöz committed
1074
        # Check that only loading custom components "my_unet", "my_scheduler" and explicit custom pipeline works
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
        pipeline = DiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-sdxl-custom-components", custom_pipeline="my_pipeline", trust_remote_code=True
        )

        assert pipeline.config.unet == ("diffusers_modules.local.my_unet_model", "MyUNetModel")
        assert pipeline.config.scheduler == ("diffusers_modules.local.my_scheduler", "MyScheduler")
        assert pipeline.__class__.__name__ == "MyPipeline"

        pipeline = pipeline.to(torch_device)
        images = pipeline("test", num_inference_steps=2, output_type="np")[0]

        assert images.shape == (1, 64, 64, 3)

    def test_remote_auto_custom_pipe(self):
        # make sure that trust remote code has to be passed
        with self.assertRaises(ValueError):
            pipeline = DiffusionPipeline.from_pretrained("hf-internal-testing/tiny-sdxl-custom-all")

M. Tolga Cangöz's avatar
M. Tolga Cangöz committed
1093
        # Check that only loading custom components "my_unet", "my_scheduler" and auto custom pipeline works
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
        pipeline = DiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-sdxl-custom-all", trust_remote_code=True
        )

        assert pipeline.config.unet == ("diffusers_modules.local.my_unet_model", "MyUNetModel")
        assert pipeline.config.scheduler == ("diffusers_modules.local.my_scheduler", "MyScheduler")
        assert pipeline.__class__.__name__ == "MyPipeline"

        pipeline = pipeline.to(torch_device)
        images = pipeline("test", num_inference_steps=2, output_type="np")[0]

        assert images.shape == (1, 64, 64, 3)

1107
    def test_local_custom_pipeline_repo(self):
Patrick von Platen's avatar
Patrick von Platen committed
1108
1109
1110
1111
        local_custom_pipeline_path = get_tests_dir("fixtures/custom_pipeline")
        pipeline = DiffusionPipeline.from_pretrained(
            "google/ddpm-cifar10-32", custom_pipeline=local_custom_pipeline_path
        )
1112
        pipeline = pipeline.to(torch_device)
Patrick von Platen's avatar
Patrick von Platen committed
1113
1114
1115
1116
1117
1118
1119
        images, output_str = pipeline(num_inference_steps=2, output_type="np")

        assert pipeline.__class__.__name__ == "CustomLocalPipeline"
        assert images[0].shape == (1, 32, 32, 3)
        # compare to https://github.com/huggingface/diffusers/blob/main/tests/fixtures/custom_pipeline/pipeline.py#L102
        assert output_str == "This is a local test"

1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
    def test_local_custom_pipeline_file(self):
        local_custom_pipeline_path = get_tests_dir("fixtures/custom_pipeline")
        local_custom_pipeline_path = os.path.join(local_custom_pipeline_path, "what_ever.py")
        pipeline = DiffusionPipeline.from_pretrained(
            "google/ddpm-cifar10-32", custom_pipeline=local_custom_pipeline_path
        )
        pipeline = pipeline.to(torch_device)
        images, output_str = pipeline(num_inference_steps=2, output_type="np")

        assert pipeline.__class__.__name__ == "CustomLocalPipeline"
        assert images[0].shape == (1, 32, 32, 3)
        # compare to https://github.com/huggingface/diffusers/blob/main/tests/fixtures/custom_pipeline/pipeline.py#L102
        assert output_str == "This is a local test"

1134
1135
1136
1137
1138
1139
1140
    def test_custom_model_and_pipeline(self):
        pipe = CustomPipeline(
            encoder=CustomEncoder(),
            scheduler=DDIMScheduler(),
        )

        with tempfile.TemporaryDirectory() as tmpdirname:
1141
            pipe.save_pretrained(tmpdirname, safe_serialization=False)
1142
1143
1144
1145

            pipe_new = CustomPipeline.from_pretrained(tmpdirname)
            pipe_new.save_pretrained(tmpdirname)

1146
1147
1148
1149
1150
1151
        conf_1 = dict(pipe.config)
        conf_2 = dict(pipe_new.config)

        del conf_2["_name_or_path"]

        assert conf_1 == conf_2
1152

Patrick von Platen's avatar
Patrick von Platen committed
1153
    @slow
1154
    @require_torch_accelerator
1155
    def test_download_from_git(self):
1156
1157
        # Because adaptive_avg_pool2d_backward_cuda
        # does not have a deterministic implementation.
Patrick von Platen's avatar
Patrick von Platen committed
1158
1159
        clip_model_id = "laion/CLIP-ViT-B-32-laion2B-s34B-b79K"

1160
        feature_extractor = CLIPImageProcessor.from_pretrained(clip_model_id)
1161
        clip_model = CLIPModel.from_pretrained(clip_model_id, torch_dtype=torch.float16)
Patrick von Platen's avatar
Patrick von Platen committed
1162
1163
1164
1165
1166
1167

        pipeline = DiffusionPipeline.from_pretrained(
            "CompVis/stable-diffusion-v1-4",
            custom_pipeline="clip_guided_stable_diffusion",
            clip_model=clip_model,
            feature_extractor=feature_extractor,
1168
            torch_dtype=torch.float16,
Patrick von Platen's avatar
Patrick von Platen committed
1169
        )
1170
        pipeline.enable_attention_slicing()
Patrick von Platen's avatar
Patrick von Platen committed
1171
1172
1173
1174
1175
1176
1177
1178
1179
        pipeline = pipeline.to(torch_device)

        # NOTE that `"CLIPGuidedStableDiffusion"` is not a class that is defined in the pypi package of th e library, but solely on the community examples folder of GitHub under:
        # https://github.com/huggingface/diffusers/blob/main/examples/community/clip_guided_stable_diffusion.py
        assert pipeline.__class__.__name__ == "CLIPGuidedStableDiffusion"

        image = pipeline("a prompt", num_inference_steps=2, output_type="np").images[0]
        assert image.shape == (512, 512, 3)

1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
    def test_save_pipeline_change_config(self):
        pipe = DiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
        )

        with tempfile.TemporaryDirectory() as tmpdirname:
            pipe.save_pretrained(tmpdirname)
            pipe = DiffusionPipeline.from_pretrained(tmpdirname)

            assert pipe.scheduler.__class__.__name__ == "PNDMScheduler"

        # let's make sure that changing the scheduler is correctly reflected
        with tempfile.TemporaryDirectory() as tmpdirname:
            pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
            pipe.save_pretrained(tmpdirname)
            pipe = DiffusionPipeline.from_pretrained(tmpdirname)

            assert pipe.scheduler.__class__.__name__ == "DPMSolverMultistepScheduler"

Patrick von Platen's avatar
Patrick von Platen committed
1199

1200
class PipelineFastTests(unittest.TestCase):
1201
1202
1203
1204
1205
1206
    def setUp(self):
        # clean up the VRAM before each test
        super().setUp()
        gc.collect()
        torch.cuda.empty_cache()

1207
1208
1209
1210
1211
1212
    def tearDown(self):
        # clean up the VRAM after each test
        super().tearDown()
        gc.collect()
        torch.cuda.empty_cache()

1213
1214
1215
1216
1217
1218
1219
1220
    def dummy_image(self):
        batch_size = 1
        num_channels = 3
        sizes = (32, 32)

        image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device)
        return image

1221
    def dummy_uncond_unet(self, sample_size=32):
1222
1223
1224
1225
        torch.manual_seed(0)
        model = UNet2DModel(
            block_out_channels=(32, 64),
            layers_per_block=2,
1226
            sample_size=sample_size,
1227
1228
1229
1230
1231
1232
1233
            in_channels=3,
            out_channels=3,
            down_block_types=("DownBlock2D", "AttnDownBlock2D"),
            up_block_types=("AttnUpBlock2D", "UpBlock2D"),
        )
        return model

1234
    def dummy_cond_unet(self, sample_size=32):
1235
1236
1237
1238
        torch.manual_seed(0)
        model = UNet2DConditionModel(
            block_out_channels=(32, 64),
            layers_per_block=2,
1239
            sample_size=sample_size,
1240
1241
1242
1243
1244
1245
1246
1247
            in_channels=4,
            out_channels=4,
            down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
            up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
            cross_attention_dim=32,
        )
        return model

1248
    @property
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
    def dummy_vae(self):
        torch.manual_seed(0)
        model = AutoencoderKL(
            block_out_channels=[32, 64],
            in_channels=3,
            out_channels=3,
            down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
            up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
            latent_channels=4,
        )
        return model

1261
    @property
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
    def dummy_text_encoder(self):
        torch.manual_seed(0)
        config = CLIPTextConfig(
            bos_token_id=0,
            eos_token_id=2,
            hidden_size=32,
            intermediate_size=37,
            layer_norm_eps=1e-05,
            num_attention_heads=4,
            num_hidden_layers=5,
            pad_token_id=1,
            vocab_size=1000,
        )
        return CLIPTextModel(config)

1277
    @property
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
    def dummy_extractor(self):
        def extract(*args, **kwargs):
            class Out:
                def __init__(self):
                    self.pixel_values = torch.ones([0])

                def to(self, device):
                    self.pixel_values.to(device)
                    return self

            return Out()

        return extract

1292
1293
1294
    @parameterized.expand(
        [
            [DDIMScheduler, DDIMPipeline, 32],
1295
            [DDPMScheduler, DDPMPipeline, 32],
1296
            [DDIMScheduler, DDIMPipeline, (32, 64)],
1297
            [DDPMScheduler, DDPMPipeline, (64, 32)],
1298
1299
1300
1301
1302
1303
1304
        ]
    )
    def test_uncond_unet_components(self, scheduler_fn=DDPMScheduler, pipeline_fn=DDPMPipeline, sample_size=32):
        unet = self.dummy_uncond_unet(sample_size)
        scheduler = scheduler_fn()
        pipeline = pipeline_fn(unet, scheduler).to(torch_device)

1305
        generator = torch.manual_seed(0)
1306
1307
1308
1309
1310
1311
1312
1313
1314
        out_image = pipeline(
            generator=generator,
            num_inference_steps=2,
            output_type="np",
        ).images
        sample_size = (sample_size, sample_size) if isinstance(sample_size, int) else sample_size
        assert out_image.shape == (1, *sample_size, 3)

    def test_stable_diffusion_components(self):
1315
        """Test that components property works correctly"""
1316
        unet = self.dummy_cond_unet()
1317
        scheduler = PNDMScheduler(skip_prk_steps=True)
1318
1319
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
1320
1321
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

1322
        image = self.dummy_image().cpu().permute(0, 2, 3, 1)[0]
1323
        init_image = Image.fromarray(np.uint8(image)).convert("RGB")
Patrick von Platen's avatar
Patrick von Platen committed
1324
        mask_image = Image.fromarray(np.uint8(image + 4)).convert("RGB").resize((32, 32))
1325
1326

        # make sure here that pndm scheduler skips prk
1327
        inpaint = StableDiffusionInpaintPipelineLegacy(
1328
1329
1330
1331
1332
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
1333
            safety_checker=None,
1334
            feature_extractor=self.dummy_extractor,
1335
        ).to(torch_device)
1336
1337
        img2img = StableDiffusionImg2ImgPipeline(**inpaint.components, image_encoder=None).to(torch_device)
        text2img = StableDiffusionPipeline(**inpaint.components, image_encoder=None).to(torch_device)
1338
1339

        prompt = "A painting of a squirrel eating a burger"
1340

1341
        generator = torch.manual_seed(0)
1342
        image_inpaint = inpaint(
1343
1344
1345
1346
            [prompt],
            generator=generator,
            num_inference_steps=2,
            output_type="np",
1347
            image=init_image,
1348
1349
1350
            mask_image=mask_image,
        ).images
        image_img2img = img2img(
1351
1352
1353
1354
            [prompt],
            generator=generator,
            num_inference_steps=2,
            output_type="np",
1355
            image=init_image,
1356
1357
1358
        ).images
        image_text2img = text2img(
            [prompt],
1359
1360
1361
            generator=generator,
            num_inference_steps=2,
            output_type="np",
1362
        ).images
1363

1364
1365
        assert image_inpaint.shape == (1, 32, 32, 3)
        assert image_img2img.shape == (1, 32, 32, 3)
1366
        assert image_text2img.shape == (1, 64, 64, 3)
1367

1368
    @require_torch_accelerator
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
    def test_pipe_false_offload_warn(self):
        unet = self.dummy_cond_unet()
        scheduler = PNDMScheduler(skip_prk_steps=True)
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        sd = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=None,
            feature_extractor=self.dummy_extractor,
        )

1386
        sd.enable_model_cpu_offload(device=torch_device)
1387
1388
1389

        logger = logging.get_logger("diffusers.pipelines.pipeline_utils")
        with CaptureLogger(logger) as cap_logger:
1390
            sd.to(torch_device)
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403

        assert "It is strongly recommended against doing so" in str(cap_logger)

        sd = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=None,
            feature_extractor=self.dummy_extractor,
        )

1404
    def test_set_scheduler(self):
1405
        unet = self.dummy_cond_unet()
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
        scheduler = PNDMScheduler(skip_prk_steps=True)
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        sd = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=None,
            feature_extractor=self.dummy_extractor,
        )

        sd.scheduler = DDIMScheduler.from_config(sd.scheduler.config)
        assert isinstance(sd.scheduler, DDIMScheduler)
        sd.scheduler = DDPMScheduler.from_config(sd.scheduler.config)
        assert isinstance(sd.scheduler, DDPMScheduler)
        sd.scheduler = PNDMScheduler.from_config(sd.scheduler.config)
        assert isinstance(sd.scheduler, PNDMScheduler)
        sd.scheduler = LMSDiscreteScheduler.from_config(sd.scheduler.config)
        assert isinstance(sd.scheduler, LMSDiscreteScheduler)
        sd.scheduler = EulerDiscreteScheduler.from_config(sd.scheduler.config)
        assert isinstance(sd.scheduler, EulerDiscreteScheduler)
        sd.scheduler = EulerAncestralDiscreteScheduler.from_config(sd.scheduler.config)
        assert isinstance(sd.scheduler, EulerAncestralDiscreteScheduler)
        sd.scheduler = DPMSolverMultistepScheduler.from_config(sd.scheduler.config)
        assert isinstance(sd.scheduler, DPMSolverMultistepScheduler)

1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
    def test_set_component_to_none(self):
        unet = self.dummy_cond_unet()
        scheduler = PNDMScheduler(skip_prk_steps=True)
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        pipeline = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=None,
            feature_extractor=self.dummy_extractor,
        )

        generator = torch.Generator(device="cpu").manual_seed(0)

        prompt = "This is a flower"

        out_image = pipeline(
            prompt=prompt,
            generator=generator,
            num_inference_steps=1,
            output_type="np",
        ).images

        pipeline.feature_extractor = None
        generator = torch.Generator(device="cpu").manual_seed(0)
        out_image_2 = pipeline(
            prompt=prompt,
            generator=generator,
            num_inference_steps=1,
            output_type="np",
        ).images

        assert out_image.shape == (1, 64, 64, 3)
        assert np.abs(out_image - out_image_2).max() < 1e-3

1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
    def test_optional_components_is_none(self):
        unet = self.dummy_cond_unet()
        scheduler = PNDMScheduler(skip_prk_steps=True)
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        items = {
            "feature_extractor": self.dummy_extractor,
            "unet": unet,
            "scheduler": scheduler,
            "vae": vae,
            "text_encoder": bert,
            "tokenizer": tokenizer,
            "safety_checker": None,
            # we don't add an image encoder
        }

        pipeline = StableDiffusionPipeline(**items)

        assert sorted(pipeline.components.keys()) == sorted(["image_encoder"] + list(items.keys()))
        assert pipeline.image_encoder is None

1499
    def test_set_scheduler_consistency(self):
1500
        unet = self.dummy_cond_unet()
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
        pndm = PNDMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler")
        ddim = DDIMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler")
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        sd = StableDiffusionPipeline(
            unet=unet,
            scheduler=pndm,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=None,
            feature_extractor=self.dummy_extractor,
        )

        pndm_config = sd.scheduler.config
        sd.scheduler = DDPMScheduler.from_config(pndm_config)
        sd.scheduler = PNDMScheduler.from_config(sd.scheduler.config)
        pndm_config_2 = sd.scheduler.config
        pndm_config_2 = {k: v for k, v in pndm_config_2.items() if k in pndm_config}

        assert dict(pndm_config) == dict(pndm_config_2)

        sd = StableDiffusionPipeline(
            unet=unet,
            scheduler=ddim,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=None,
            feature_extractor=self.dummy_extractor,
        )

        ddim_config = sd.scheduler.config
        sd.scheduler = LMSDiscreteScheduler.from_config(ddim_config)
        sd.scheduler = DDIMScheduler.from_config(sd.scheduler.config)
        ddim_config_2 = sd.scheduler.config
        ddim_config_2 = {k: v for k, v in ddim_config_2.items() if k in ddim_config}

        assert dict(ddim_config) == dict(ddim_config_2)

1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
    def test_save_safe_serialization(self):
        pipeline = StableDiffusionPipeline.from_pretrained("hf-internal-testing/tiny-stable-diffusion-torch")
        with tempfile.TemporaryDirectory() as tmpdirname:
            pipeline.save_pretrained(tmpdirname, safe_serialization=True)

            # Validate that the VAE safetensor exists and are of the correct format
            vae_path = os.path.join(tmpdirname, "vae", "diffusion_pytorch_model.safetensors")
            assert os.path.exists(vae_path), f"Could not find {vae_path}"
            _ = safetensors.torch.load_file(vae_path)

            # Validate that the UNet safetensor exists and are of the correct format
            unet_path = os.path.join(tmpdirname, "unet", "diffusion_pytorch_model.safetensors")
            assert os.path.exists(unet_path), f"Could not find {unet_path}"
            _ = safetensors.torch.load_file(unet_path)

            # Validate that the text encoder safetensor exists and are of the correct format
            text_encoder_path = os.path.join(tmpdirname, "text_encoder", "model.safetensors")
1560
1561
            assert os.path.exists(text_encoder_path), f"Could not find {text_encoder_path}"
            _ = safetensors.torch.load_file(text_encoder_path)
1562
1563
1564
1565
1566
1567
1568
1569

            pipeline = StableDiffusionPipeline.from_pretrained(tmpdirname)
            assert pipeline.unet is not None
            assert pipeline.vae is not None
            assert pipeline.text_encoder is not None
            assert pipeline.scheduler is not None
            assert pipeline.feature_extractor is not None

1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
    def test_no_pytorch_download_when_doing_safetensors(self):
        # by default we don't download
        with tempfile.TemporaryDirectory() as tmpdirname:
            _ = StableDiffusionPipeline.from_pretrained(
                "hf-internal-testing/diffusers-stable-diffusion-tiny-all", cache_dir=tmpdirname
            )

            path = os.path.join(
                tmpdirname,
                "models--hf-internal-testing--diffusers-stable-diffusion-tiny-all",
                "snapshots",
                "07838d72e12f9bcec1375b0482b80c1d399be843",
                "unet",
            )
            # safetensors exists
            assert os.path.exists(os.path.join(path, "diffusion_pytorch_model.safetensors"))
            # pytorch does not
            assert not os.path.exists(os.path.join(path, "diffusion_pytorch_model.bin"))

    def test_no_safetensors_download_when_doing_pytorch(self):
1590
        use_safetensors = False
1591
1592
1593

        with tempfile.TemporaryDirectory() as tmpdirname:
            _ = StableDiffusionPipeline.from_pretrained(
1594
1595
1596
                "hf-internal-testing/diffusers-stable-diffusion-tiny-all",
                cache_dir=tmpdirname,
                use_safetensors=use_safetensors,
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
            )

            path = os.path.join(
                tmpdirname,
                "models--hf-internal-testing--diffusers-stable-diffusion-tiny-all",
                "snapshots",
                "07838d72e12f9bcec1375b0482b80c1d399be843",
                "unet",
            )
            # safetensors does not exists
            assert not os.path.exists(os.path.join(path, "diffusion_pytorch_model.safetensors"))
            # pytorch does
            assert os.path.exists(os.path.join(path, "diffusion_pytorch_model.bin"))

1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
    def test_optional_components(self):
        unet = self.dummy_cond_unet()
        pndm = PNDMScheduler.from_config("hf-internal-testing/tiny-stable-diffusion-torch", subfolder="scheduler")
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        orig_sd = StableDiffusionPipeline(
            unet=unet,
            scheduler=pndm,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=unet,
            feature_extractor=self.dummy_extractor,
        )
        sd = orig_sd

        assert sd.config.requires_safety_checker is True

        with tempfile.TemporaryDirectory() as tmpdirname:
            sd.save_pretrained(tmpdirname)

            # Test that passing None works
            sd = StableDiffusionPipeline.from_pretrained(
                tmpdirname, feature_extractor=None, safety_checker=None, requires_safety_checker=False
            )

            assert sd.config.requires_safety_checker is False
            assert sd.config.safety_checker == (None, None)
            assert sd.config.feature_extractor == (None, None)

        with tempfile.TemporaryDirectory() as tmpdirname:
            sd.save_pretrained(tmpdirname)

            # Test that loading previous None works
            sd = StableDiffusionPipeline.from_pretrained(tmpdirname)

            assert sd.config.requires_safety_checker is False
            assert sd.config.safety_checker == (None, None)
            assert sd.config.feature_extractor == (None, None)

            orig_sd.save_pretrained(tmpdirname)

            # Test that loading without any directory works
            shutil.rmtree(os.path.join(tmpdirname, "safety_checker"))
            with open(os.path.join(tmpdirname, sd.config_name)) as f:
                config = json.load(f)
                config["safety_checker"] = [None, None]
            with open(os.path.join(tmpdirname, sd.config_name), "w") as f:
                json.dump(config, f)

            sd = StableDiffusionPipeline.from_pretrained(tmpdirname, requires_safety_checker=False)
            sd.save_pretrained(tmpdirname)
            sd = StableDiffusionPipeline.from_pretrained(tmpdirname)

            assert sd.config.requires_safety_checker is False
            assert sd.config.safety_checker == (None, None)
            assert sd.config.feature_extractor == (None, None)

            # Test that loading from deleted model index works
            with open(os.path.join(tmpdirname, sd.config_name)) as f:
                config = json.load(f)
                del config["safety_checker"]
                del config["feature_extractor"]
            with open(os.path.join(tmpdirname, sd.config_name), "w") as f:
                json.dump(config, f)

            sd = StableDiffusionPipeline.from_pretrained(tmpdirname)

            assert sd.config.requires_safety_checker is False
            assert sd.config.safety_checker == (None, None)
            assert sd.config.feature_extractor == (None, None)

        with tempfile.TemporaryDirectory() as tmpdirname:
            sd.save_pretrained(tmpdirname)

            # Test that partially loading works
            sd = StableDiffusionPipeline.from_pretrained(tmpdirname, feature_extractor=self.dummy_extractor)

            assert sd.config.requires_safety_checker is False
            assert sd.config.safety_checker == (None, None)
            assert sd.config.feature_extractor != (None, None)

            # Test that partially loading works
            sd = StableDiffusionPipeline.from_pretrained(
                tmpdirname,
                feature_extractor=self.dummy_extractor,
                safety_checker=unet,
                requires_safety_checker=[True, True],
            )

            assert sd.config.requires_safety_checker == [True, True]
            assert sd.config.safety_checker != (None, None)
            assert sd.config.feature_extractor != (None, None)

        with tempfile.TemporaryDirectory() as tmpdirname:
            sd.save_pretrained(tmpdirname)
            sd = StableDiffusionPipeline.from_pretrained(tmpdirname, feature_extractor=self.dummy_extractor)

            assert sd.config.requires_safety_checker == [True, True]
            assert sd.config.safety_checker != (None, None)
            assert sd.config.feature_extractor != (None, None)

1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
    def test_name_or_path(self):
        model_path = "hf-internal-testing/tiny-stable-diffusion-torch"
        sd = DiffusionPipeline.from_pretrained(model_path)

        assert sd.name_or_path == model_path

        with tempfile.TemporaryDirectory() as tmpdirname:
            sd.save_pretrained(tmpdirname)
            sd = DiffusionPipeline.from_pretrained(tmpdirname)

            assert sd.name_or_path == tmpdirname

Sayak Paul's avatar
Sayak Paul committed
1727
    def test_error_no_variant_available(self):
1728
        variant = "fp16"
Sayak Paul's avatar
Sayak Paul committed
1729
        with self.assertRaises(ValueError) as error_context:
1730
            _ = StableDiffusionPipeline.from_pretrained(
1731
1732
1733
                "hf-internal-testing/diffusers-stable-diffusion-tiny-all", variant=variant
            )

Sayak Paul's avatar
Sayak Paul committed
1734
1735
        assert "but no such modeling files are available" in str(error_context.exception)
        assert variant in str(error_context.exception)
1736

1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
    def test_pipe_to(self):
        unet = self.dummy_cond_unet()
        scheduler = PNDMScheduler(skip_prk_steps=True)
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        sd = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=None,
            feature_extractor=self.dummy_extractor,
        )

        device_type = torch.device(torch_device).type

        sd1 = sd.to(device_type)
        sd2 = sd.to(torch.device(device_type))
        sd3 = sd.to(device_type, torch.float32)
        sd4 = sd.to(device=device_type)
        sd5 = sd.to(torch_device=device_type)
        sd6 = sd.to(device_type, dtype=torch.float32)
        sd7 = sd.to(device_type, torch_dtype=torch.float32)

        assert sd1.device.type == device_type
        assert sd2.device.type == device_type
        assert sd3.device.type == device_type
        assert sd4.device.type == device_type
        assert sd5.device.type == device_type
        assert sd6.device.type == device_type
        assert sd7.device.type == device_type

        sd1 = sd.to(torch.float16)
        sd2 = sd.to(None, torch.float16)
        sd3 = sd.to(dtype=torch.float16)
1775
        sd4 = sd.to(dtype=torch.float16)
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
        sd5 = sd.to(None, dtype=torch.float16)
        sd6 = sd.to(None, torch_dtype=torch.float16)

        assert sd1.dtype == torch.float16
        assert sd2.dtype == torch.float16
        assert sd3.dtype == torch.float16
        assert sd4.dtype == torch.float16
        assert sd5.dtype == torch.float16
        assert sd6.dtype == torch.float16

        sd1 = sd.to(device=device_type, dtype=torch.float16)
        sd2 = sd.to(torch_device=device_type, torch_dtype=torch.float16)
        sd3 = sd.to(device_type, torch.float16)

        assert sd1.dtype == torch.float16
        assert sd2.dtype == torch.float16
        assert sd3.dtype == torch.float16

        assert sd1.device.type == device_type
        assert sd2.device.type == device_type
        assert sd3.device.type == device_type

    def test_pipe_same_device_id_offload(self):
        unet = self.dummy_cond_unet()
        scheduler = PNDMScheduler(skip_prk_steps=True)
        vae = self.dummy_vae
        bert = self.dummy_text_encoder
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        sd = StableDiffusionPipeline(
            unet=unet,
            scheduler=scheduler,
            vae=vae,
            text_encoder=bert,
            tokenizer=tokenizer,
            safety_checker=None,
            feature_extractor=self.dummy_extractor,
        )

        sd.enable_model_cpu_offload(gpu_id=5)
        assert sd._offload_gpu_id == 5
        sd.maybe_free_model_hooks()
        assert sd._offload_gpu_id == 5

Marc Sun's avatar
Marc Sun committed
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
    @parameterized.expand([torch.float32, torch.float16])
    @require_hf_hub_version_greater("0.26.5")
    @require_transformers_version_greater("4.47.1")
    def test_load_dduf_from_hub(self, dtype):
        with tempfile.TemporaryDirectory() as tmpdir:
            pipe = DiffusionPipeline.from_pretrained(
                "DDUF/tiny-flux-dev-pipe-dduf", dduf_file="fluxpipeline.dduf", cache_dir=tmpdir, torch_dtype=dtype
            ).to(torch_device)
            out_1 = pipe(prompt="dog", num_inference_steps=5, generator=torch.manual_seed(0), output_type="np").images

            pipe.save_pretrained(tmpdir)
            loaded_pipe = DiffusionPipeline.from_pretrained(tmpdir, torch_dtype=dtype).to(torch_device)

            out_2 = loaded_pipe(
                prompt="dog", num_inference_steps=5, generator=torch.manual_seed(0), output_type="np"
            ).images

        self.assertTrue(np.allclose(out_1, out_2, atol=1e-4, rtol=1e-4))

    @require_hf_hub_version_greater("0.26.5")
    @require_transformers_version_greater("4.47.1")
    def test_load_dduf_from_hub_local_files_only(self):
        with tempfile.TemporaryDirectory() as tmpdir:
            pipe = DiffusionPipeline.from_pretrained(
                "DDUF/tiny-flux-dev-pipe-dduf", dduf_file="fluxpipeline.dduf", cache_dir=tmpdir
            ).to(torch_device)
            out_1 = pipe(prompt="dog", num_inference_steps=5, generator=torch.manual_seed(0), output_type="np").images

            local_files_pipe = DiffusionPipeline.from_pretrained(
                "DDUF/tiny-flux-dev-pipe-dduf", dduf_file="fluxpipeline.dduf", cache_dir=tmpdir, local_files_only=True
            ).to(torch_device)
            out_2 = local_files_pipe(
                prompt="dog", num_inference_steps=5, generator=torch.manual_seed(0), output_type="np"
            ).images

        self.assertTrue(np.allclose(out_1, out_2, atol=1e-4, rtol=1e-4))

    def test_dduf_raises_error_with_custom_pipeline(self):
        with self.assertRaises(NotImplementedError):
            _ = DiffusionPipeline.from_pretrained(
                "DDUF/tiny-flux-dev-pipe-dduf", dduf_file="fluxpipeline.dduf", custom_pipeline="my_pipeline"
            )

    def test_dduf_raises_error_with_connected_pipeline(self):
        with self.assertRaises(NotImplementedError):
            _ = DiffusionPipeline.from_pretrained(
                "DDUF/tiny-flux-dev-pipe-dduf", dduf_file="fluxpipeline.dduf", load_connected_pipeline=True
            )

1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
    def test_wrong_model(self):
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")
        with self.assertRaises(ValueError) as error_context:
            _ = StableDiffusionPipeline.from_pretrained(
                "hf-internal-testing/diffusers-stable-diffusion-tiny-all", text_encoder=tokenizer
            )

        assert "is of type" in str(error_context.exception)
        assert "but should be" in str(error_context.exception)

Marc Sun's avatar
Marc Sun committed
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
    @require_hf_hub_version_greater("0.26.5")
    @require_transformers_version_greater("4.47.1")
    def test_dduf_load_sharded_checkpoint_diffusion_model(self):
        with tempfile.TemporaryDirectory() as tmpdir:
            pipe = DiffusionPipeline.from_pretrained(
                "hf-internal-testing/tiny-flux-dev-pipe-sharded-checkpoint-DDUF",
                dduf_file="tiny-flux-dev-pipe-sharded-checkpoint.dduf",
                cache_dir=tmpdir,
            ).to(torch_device)

            out_1 = pipe(prompt="dog", num_inference_steps=5, generator=torch.manual_seed(0), output_type="np").images

            pipe.save_pretrained(tmpdir)
            loaded_pipe = DiffusionPipeline.from_pretrained(tmpdir).to(torch_device)

            out_2 = loaded_pipe(
                prompt="dog", num_inference_steps=5, generator=torch.manual_seed(0), output_type="np"
            ).images

        self.assertTrue(np.allclose(out_1, out_2, atol=1e-4, rtol=1e-4))

1900

1901
@slow
1902
@require_torch_accelerator
1903
class PipelineSlowTests(unittest.TestCase):
1904
1905
1906
1907
    def setUp(self):
        # clean up the VRAM before each test
        super().setUp()
        gc.collect()
1908
        backend_empty_cache(torch_device)
1909

1910
1911
1912
1913
    def tearDown(self):
        # clean up the VRAM after each test
        super().tearDown()
        gc.collect()
1914
        backend_empty_cache(torch_device)
1915

1916
1917
1918
    def test_smart_download(self):
        model_id = "hf-internal-testing/unet-pipeline-dummy"
        with tempfile.TemporaryDirectory() as tmpdirname:
1919
            _ = DiffusionPipeline.from_pretrained(model_id, cache_dir=tmpdirname, force_download=True)
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
            local_repo_name = "--".join(["models"] + model_id.split("/"))
            snapshot_dir = os.path.join(tmpdirname, local_repo_name, "snapshots")
            snapshot_dir = os.path.join(snapshot_dir, os.listdir(snapshot_dir)[0])

            # inspect all downloaded files to make sure that everything is included
            assert os.path.isfile(os.path.join(snapshot_dir, DiffusionPipeline.config_name))
            assert os.path.isfile(os.path.join(snapshot_dir, CONFIG_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, SCHEDULER_CONFIG_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, WEIGHTS_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, "scheduler", SCHEDULER_CONFIG_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, "unet", WEIGHTS_NAME))
            assert os.path.isfile(os.path.join(snapshot_dir, "unet", WEIGHTS_NAME))
            # let's make sure the super large numpy file:
            # https://huggingface.co/hf-internal-testing/unet-pipeline-dummy/blob/main/big_array.npy
            # is not downloaded, but all the expected ones
            assert not os.path.isfile(os.path.join(snapshot_dir, "big_array.npy"))

1937
1938
    def test_warning_unused_kwargs(self):
        model_id = "hf-internal-testing/unet-pipeline-dummy"
1939
        logger = logging.get_logger("diffusers.pipelines")
1940
1941
        with tempfile.TemporaryDirectory() as tmpdirname:
            with CaptureLogger(logger) as cap_logger:
1942
                DiffusionPipeline.from_pretrained(
1943
1944
1945
1946
                    model_id,
                    not_used=True,
                    cache_dir=tmpdirname,
                    force_download=True,
1947
                )
1948

1949
        assert (
1950
1951
            cap_logger.out.strip().split("\n")[-1]
            == "Keyword arguments {'not_used': True} are not expected by DDPMPipeline and will be ignored."
1952
        )
1953

1954
    def test_from_save_pretrained(self):
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
        # 1. Load models
        model = UNet2DModel(
            block_out_channels=(32, 64),
            layers_per_block=2,
            sample_size=32,
            in_channels=3,
            out_channels=3,
            down_block_types=("DownBlock2D", "AttnDownBlock2D"),
            up_block_types=("AttnUpBlock2D", "UpBlock2D"),
        )
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
        scheduler = DDPMScheduler(num_train_timesteps=10)

        ddpm = DDPMPipeline(model, scheduler)
        ddpm.to(torch_device)
        ddpm.set_progress_bar_config(disable=None)

        with tempfile.TemporaryDirectory() as tmpdirname:
            ddpm.save_pretrained(tmpdirname)
            new_ddpm = DDPMPipeline.from_pretrained(tmpdirname)
            new_ddpm.to(torch_device)

        generator = torch.Generator(device=torch_device).manual_seed(0)
1977
        image = ddpm(generator=generator, num_inference_steps=5, output_type="np").images
1978
1979

        generator = torch.Generator(device=torch_device).manual_seed(0)
1980
        new_image = new_ddpm(generator=generator, num_inference_steps=5, output_type="np").images
1981

1982
        assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass"
1983

1984
    @is_torch_compile
1985
    @require_torch_2
1986
1987
1988
1989
    @unittest.skipIf(
        get_python_version == (3, 12),
        reason="Torch Dynamo isn't yet supported for Python 3.12.",
    )
1990
    def test_from_save_pretrained_dynamo(self):
1991
        run_test_in_subprocess(test_case=self, target_func=_test_from_save_pretrained_dynamo, inputs=None)
1992
1993
1994
1995

    def test_from_pretrained_hub(self):
        model_path = "google/ddpm-cifar10-32"

1996
        scheduler = DDPMScheduler(num_train_timesteps=10)
1997

1998
        ddpm = DDPMPipeline.from_pretrained(model_path, scheduler=scheduler)
1999
        ddpm = ddpm.to(torch_device)
2000
        ddpm.set_progress_bar_config(disable=None)
2001

2002
        ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler)
2003
        ddpm_from_hub = ddpm_from_hub.to(torch_device)
2004
        ddpm_from_hub.set_progress_bar_config(disable=None)
2005

2006
        generator = torch.Generator(device=torch_device).manual_seed(0)
2007
        image = ddpm(generator=generator, num_inference_steps=5, output_type="np").images
2008

2009
        generator = torch.Generator(device=torch_device).manual_seed(0)
2010
        new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="np").images
2011

2012
        assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass"
2013
2014
2015
2016

    def test_from_pretrained_hub_pass_model(self):
        model_path = "google/ddpm-cifar10-32"

2017
2018
        scheduler = DDPMScheduler(num_train_timesteps=10)

2019
        # pass unet into DiffusionPipeline
2020
2021
        unet = UNet2DModel.from_pretrained(model_path)
        ddpm_from_hub_custom_model = DiffusionPipeline.from_pretrained(model_path, unet=unet, scheduler=scheduler)
2022
        ddpm_from_hub_custom_model = ddpm_from_hub_custom_model.to(torch_device)
2023
        ddpm_from_hub_custom_model.set_progress_bar_config(disable=None)
2024

2025
        ddpm_from_hub = DiffusionPipeline.from_pretrained(model_path, scheduler=scheduler)
2026
        ddpm_from_hub = ddpm_from_hub.to(torch_device)
2027
        ddpm_from_hub_custom_model.set_progress_bar_config(disable=None)
2028

2029
        generator = torch.Generator(device=torch_device).manual_seed(0)
2030
        image = ddpm_from_hub_custom_model(generator=generator, num_inference_steps=5, output_type="np").images
2031

2032
        generator = torch.Generator(device=torch_device).manual_seed(0)
2033
        new_image = ddpm_from_hub(generator=generator, num_inference_steps=5, output_type="np").images
2034

2035
        assert np.abs(image - new_image).max() < 1e-5, "Models don't give the same forward pass"
2036
2037
2038
2039

    def test_output_format(self):
        model_path = "google/ddpm-cifar10-32"

2040
        scheduler = DDIMScheduler.from_pretrained(model_path)
Patrick von Platen's avatar
Patrick von Platen committed
2041
        pipe = DDIMPipeline.from_pretrained(model_path, scheduler=scheduler)
2042
        pipe.to(torch_device)
2043
        pipe.set_progress_bar_config(disable=None)
2044

2045
        images = pipe(output_type="np").images
2046
2047
2048
        assert images.shape == (1, 32, 32, 3)
        assert isinstance(images, np.ndarray)

2049
        images = pipe(output_type="pil", num_inference_steps=4).images
2050
2051
2052
2053
2054
        assert isinstance(images, list)
        assert len(images) == 1
        assert isinstance(images[0], PIL.Image.Image)

        # use PIL by default
2055
        images = pipe(num_inference_steps=4).images
2056
2057
2058
        assert isinstance(images, list)
        assert isinstance(images[0], PIL.Image.Image)

2059
    @require_flax
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
    def test_from_flax_from_pt(self):
        pipe_pt = StableDiffusionPipeline.from_pretrained(
            "hf-internal-testing/tiny-stable-diffusion-torch", safety_checker=None
        )
        pipe_pt.to(torch_device)

        from diffusers import FlaxStableDiffusionPipeline

        with tempfile.TemporaryDirectory() as tmpdirname:
            pipe_pt.save_pretrained(tmpdirname)

            pipe_flax, params = FlaxStableDiffusionPipeline.from_pretrained(
                tmpdirname, safety_checker=None, from_pt=True
            )

        with tempfile.TemporaryDirectory() as tmpdirname:
            pipe_flax.save_pretrained(tmpdirname, params=params)
            pipe_pt_2 = StableDiffusionPipeline.from_pretrained(tmpdirname, safety_checker=None, from_flax=True)
            pipe_pt_2.to(torch_device)

        prompt = "Hello"

        generator = torch.manual_seed(0)
        image_0 = pipe_pt(
            [prompt],
            generator=generator,
            num_inference_steps=2,
            output_type="np",
        ).images[0]

        generator = torch.manual_seed(0)
        image_1 = pipe_pt_2(
            [prompt],
            generator=generator,
            num_inference_steps=2,
            output_type="np",
        ).images[0]

        assert np.abs(image_0 - image_1).sum() < 1e-5, "Models don't give the same forward pass"

2100
2101
2102
2103
2104
2105
    @require_compel
    def test_weighted_prompts_compel(self):
        from compel import Compel

        pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
        pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
2106
        pipe.enable_model_cpu_offload(device=torch_device)
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
        pipe.enable_attention_slicing()

        compel = Compel(tokenizer=pipe.tokenizer, text_encoder=pipe.text_encoder)

        prompt = "a red cat playing with a ball{}"

        prompts = [prompt.format(s) for s in ["", "++", "--"]]

        prompt_embeds = compel(prompts)

        generator = [torch.Generator(device="cpu").manual_seed(33) for _ in range(prompt_embeds.shape[0])]

        images = pipe(
2120
            prompt_embeds=prompt_embeds, generator=generator, num_inference_steps=20, output_type="np"
2121
2122
2123
2124
2125
2126
2127
2128
        ).images

        for i, image in enumerate(images):
            expected_image = load_numpy(
                "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
                f"/compel/forest_{i}.npy"
            )

2129
            assert np.abs(image - expected_image).max() < 3e-1
2130

2131
2132

@nightly
2133
@require_torch_accelerator
2134
class PipelineNightlyTests(unittest.TestCase):
2135
2136
2137
2138
    def setUp(self):
        # clean up the VRAM before each test
        super().setUp()
        gc.collect()
2139
        backend_empty_cache(torch_device)
2140

2141
2142
2143
2144
    def tearDown(self):
        # clean up the VRAM after each test
        super().tearDown()
        gc.collect()
2145
        backend_empty_cache(torch_device)
2146

2147
2148
    def test_ddpm_ddim_equality_batched(self):
        seed = 0
2149
        model_id = "google/ddpm-cifar10-32"
2150

2151
        unet = UNet2DModel.from_pretrained(model_id)
2152
2153
        ddpm_scheduler = DDPMScheduler()
        ddim_scheduler = DDIMScheduler()
2154

2155
2156
2157
        ddpm = DDPMPipeline(unet=unet, scheduler=ddpm_scheduler)
        ddpm.to(torch_device)
        ddpm.set_progress_bar_config(disable=None)
2158

2159
2160
2161
        ddim = DDIMPipeline(unet=unet, scheduler=ddim_scheduler)
        ddim.to(torch_device)
        ddim.set_progress_bar_config(disable=None)
2162

2163
        generator = torch.Generator(device=torch_device).manual_seed(seed)
2164
        ddpm_images = ddpm(batch_size=2, generator=generator, output_type="np").images
2165

2166
        generator = torch.Generator(device=torch_device).manual_seed(seed)
2167
        ddim_images = ddim(
2168
            batch_size=2,
2169
2170
2171
            generator=generator,
            num_inference_steps=1000,
            eta=1.0,
2172
            output_type="np",
2173
            use_clipped_model_output=True,  # Need this to make DDIM match DDPM
2174
        ).images
2175

2176
2177
        # the values aren't exactly equal, but the images look the same visually
        assert np.abs(ddpm_images - ddim_images).max() < 1e-1