testing_utils.py 57.2 KB
Newer Older
1
import functools
2
import glob
3
import importlib
4
import importlib.metadata
Patrick von Platen's avatar
Patrick von Platen committed
5
import inspect
6
import io
7
import logging
8
import multiprocessing
Patrick von Platen's avatar
Patrick von Platen committed
9
10
import os
import random
11
import re
12
import struct
13
import sys
14
import tempfile
15
import time
Patrick von Platen's avatar
Patrick von Platen committed
16
import unittest
17
import urllib.parse
18
from collections import UserDict
19
from contextlib import contextmanager
20
from io import BytesIO, StringIO
21
from pathlib import Path
22
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Set, Tuple, Union
Patrick von Platen's avatar
Patrick von Platen committed
23

24
import numpy as np
25
26
27
import PIL.Image
import PIL.ImageOps
import requests
28
from numpy.linalg import norm
29
30
from packaging import version

31
from .constants import DIFFUSERS_REQUEST_TIMEOUT
32
33
from .import_utils import (
    BACKENDS_MAPPING,
34
35
    is_accelerate_available,
    is_bitsandbytes_available,
36
37
    is_compel_available,
    is_flax_available,
38
    is_gguf_available,
39
    is_kernels_available,
40
    is_note_seq_available,
41
42
    is_onnx_available,
    is_opencv_available,
43
    is_optimum_quanto_available,
44
    is_peft_available,
45
    is_timm_available,
46
    is_torch_available,
47
    is_torch_version,
Aryan's avatar
Aryan committed
48
    is_torchao_available,
49
    is_torchsde_available,
50
    is_transformers_available,
51
)
Will Berman's avatar
Will Berman committed
52
from .logging import get_logger
53

Patrick von Platen's avatar
Patrick von Platen committed
54

55
56
57
58
59
60
61
62
63
64
65
if is_torch_available():
    import torch

    IS_ROCM_SYSTEM = torch.version.hip is not None
    IS_CUDA_SYSTEM = torch.version.cuda is not None
    IS_XPU_SYSTEM = getattr(torch.version, "xpu", None) is not None
else:
    IS_ROCM_SYSTEM = False
    IS_CUDA_SYSTEM = False
    IS_XPU_SYSTEM = False

Patrick von Platen's avatar
Patrick von Platen committed
66
global_rng = random.Random()
67

Will Berman's avatar
Will Berman committed
68
logger = get_logger(__name__)
69
70
71
72
logger.warning(
    "diffusers.utils.testing_utils' is deprecated and will be removed in a future version. "
    "Determinism and device backend utilities have been moved to `diffusers.utils.torch_utils`. "
)
73
74
75
76
77
78
79
80
_required_peft_version = is_peft_available() and version.parse(
    version.parse(importlib.metadata.version("peft")).base_version
) > version.parse("0.5")
_required_transformers_version = is_transformers_available() and version.parse(
    version.parse(importlib.metadata.version("transformers")).base_version
) > version.parse("4.33")

USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version
81
BIG_GPU_MEMORY = int(os.getenv("BIG_GPU_MEMORY", 40))
82

Patrick von Platen's avatar
Patrick von Platen committed
83
84
85
if is_torch_available():
    import torch

Arsalan's avatar
Arsalan committed
86
87
88
89
90
91
92
93
94
95
96
    # Set a backend environment variable for any extra module import required for a custom accelerator
    if "DIFFUSERS_TEST_BACKEND" in os.environ:
        backend = os.environ["DIFFUSERS_TEST_BACKEND"]
        try:
            _ = importlib.import_module(backend)
        except ModuleNotFoundError as e:
            raise ModuleNotFoundError(
                f"Failed to import `DIFFUSERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module \
                    to enable a specified backend.):\n{e}"
            ) from e

Will Berman's avatar
Will Berman committed
97
98
    if "DIFFUSERS_TEST_DEVICE" in os.environ:
        torch_device = os.environ["DIFFUSERS_TEST_DEVICE"]
99
100
101
102
103
104
105
        try:
            # try creating device to see if provided device is valid
            _ = torch.device(torch_device)
        except RuntimeError as e:
            raise RuntimeError(
                f"Unknown testing device specified by environment variable `DIFFUSERS_TEST_DEVICE`: {torch_device}"
            ) from e
Will Berman's avatar
Will Berman committed
106
107
        logger.info(f"torch_device overrode to {torch_device}")
    else:
108
109
110
111
112
113
        if torch.cuda.is_available():
            torch_device = "cuda"
        elif torch.xpu.is_available():
            torch_device = "xpu"
        else:
            torch_device = "cpu"
Will Berman's avatar
Will Berman committed
114
115
116
117
118
119
120
121
        is_torch_higher_equal_than_1_12 = version.parse(
            version.parse(torch.__version__).base_version
        ) >= version.parse("1.12")

        if is_torch_higher_equal_than_1_12:
            # Some builds of torch 1.12 don't have the mps backend registered. See #892 for more details
            mps_backend_registered = hasattr(torch.backends, "mps")
            torch_device = "mps" if (mps_backend_registered and torch.backends.mps.is_available()) else torch_device
Patrick von Platen's avatar
Patrick von Platen committed
122

123
124
    from .torch_utils import get_torch_cuda_device_capability

Patrick von Platen's avatar
Patrick von Platen committed
125

126
127
128
129
130
131
132
133
def torch_all_close(a, b, *args, **kwargs):
    if not is_torch_available():
        raise ValueError("PyTorch needs to be installed to use this function.")
    if not torch.allclose(a, b, *args, **kwargs):
        assert False, f"Max diff is absolute {(a - b).abs().max()}. Diff tensor is {(a - b).abs()}."
    return True


134
135
136
137
138
139
140
def numpy_cosine_similarity_distance(a, b):
    similarity = np.dot(a, b) / (norm(a) * norm(b))
    distance = 1.0 - similarity.mean()

    return distance


141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
def check_if_dicts_are_equal(dict1, dict2):
    dict1, dict2 = dict1.copy(), dict2.copy()

    for key, value in dict1.items():
        if isinstance(value, set):
            dict1[key] = sorted(value)
    for key, value in dict2.items():
        if isinstance(value, set):
            dict2[key] = sorted(value)

    for key in dict1:
        if key not in dict2:
            return False
        if dict1[key] != dict2[key]:
            return False

    for key in dict2:
        if key not in dict1:
            return False

    return True


164
165
166
167
168
169
170
171
172
173
def print_tensor_test(
    tensor,
    limit_to_slices=None,
    max_torch_print=None,
    filename="test_corrections.txt",
    expected_tensor_name="expected_slice",
):
    if max_torch_print:
        torch.set_printoptions(threshold=10_000)

174
175
176
    test_name = os.environ.get("PYTEST_CURRENT_TEST")
    if not torch.is_tensor(tensor):
        tensor = torch.from_numpy(tensor)
177
178
    if limit_to_slices:
        tensor = tensor[0, -3:, -3:, -1]
179
180
181
182
183
184
185
186

    tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "")
    # format is usually:
    # expected_slice = np.array([-0.5713, -0.3018, -0.9814, 0.04663, -0.879, 0.76, -1.734, 0.1044, 1.161])
    output_str = tensor_str.replace("tensor", f"{expected_tensor_name} = np.array")
    test_file, test_class, test_fn = test_name.split("::")
    test_fn = test_fn.split()[0]
    with open(filename, "a") as f:
187
        print("::".join([test_file, test_class, test_fn, output_str]), file=f)
188
189


Patrick von Platen's avatar
Patrick von Platen committed
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
def get_tests_dir(append_path=None):
    """
    Args:
        append_path: optional path to append to the tests dir path
    Return:
        The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
        joined after the `tests` dir the former is provided.
    """
    # this function caller's __file__
    caller__file__ = inspect.stack()[1][1]
    tests_dir = os.path.abspath(os.path.dirname(caller__file__))

    while not tests_dir.endswith("tests"):
        tests_dir = os.path.dirname(tests_dir)

    if append_path:
206
        return Path(tests_dir, append_path).as_posix()
Patrick von Platen's avatar
Patrick von Platen committed
207
208
209
210
    else:
        return tests_dir


211
212
213
214
# Taken from the following PR:
# https://github.com/huggingface/accelerate/pull/1964
def str_to_bool(value) -> int:
    """
215
216
    Converts a string representation of truth to `True` (1) or `False` (0). True values are `y`, `yes`, `t`, `true`,
    `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`;
217
218
219
220
221
222
223
224
225
226
    """
    value = value.lower()
    if value in ("y", "yes", "t", "true", "on", "1"):
        return 1
    elif value in ("n", "no", "f", "false", "off", "0"):
        return 0
    else:
        raise ValueError(f"invalid truth value {value}")


Patrick von Platen's avatar
Patrick von Platen committed
227
228
229
230
231
232
233
234
235
def parse_flag_from_env(key, default=False):
    try:
        value = os.environ[key]
    except KeyError:
        # KEY isn't set, default to `default`.
        _value = default
    else:
        # KEY is set, convert it to True or False.
        try:
Sayak Paul's avatar
Sayak Paul committed
236
            _value = str_to_bool(value)
Patrick von Platen's avatar
Patrick von Platen committed
237
238
239
240
241
242
243
        except ValueError:
            # More values are supported, but let's keep the message simple.
            raise ValueError(f"If set, {key} must be yes or no.")
    return _value


_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
244
_run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False)
245
_run_compile_tests = parse_flag_from_env("RUN_COMPILE", default=False)
Patrick von Platen's avatar
Patrick von Platen committed
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271


def floats_tensor(shape, scale=1.0, rng=None, name=None):
    """Creates a random float32 tensor"""
    if rng is None:
        rng = global_rng

    total_dims = 1
    for dim in shape:
        total_dims *= dim

    values = []
    for _ in range(total_dims):
        values.append(rng.random() * scale)

    return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous()


def slow(test_case):
    """
    Decorator marking a test as slow.

    Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.

    """
    return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
272
273


274
275
276
277
278
279
280
281
282
283
def nightly(test_case):
    """
    Decorator marking a test that runs nightly in the diffusers CI.

    Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them.

    """
    return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case)


284
285
286
287
288
289
290
291
292
293
def is_torch_compile(test_case):
    """
    Decorator marking a test that runs compile tests in the diffusers CI.

    Compile tests are skipped by default. Set the RUN_COMPILE environment variable to a truthy value to run them.

    """
    return unittest.skipUnless(_run_compile_tests, "test is torch compile")(test_case)


294
295
296
297
298
299
300
def require_torch(test_case):
    """
    Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed.
    """
    return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)


301
302
303
304
305
306
307
308
309
def require_torch_2(test_case):
    """
    Decorator marking a test that requires PyTorch 2. These tests are skipped when it isn't installed.
    """
    return unittest.skipUnless(is_torch_available() and is_torch_version(">=", "2.0.0"), "test requires PyTorch 2")(
        test_case
    )


310
311
312
313
314
315
316
317
318
319
320
321
def require_torch_version_greater_equal(torch_version):
    """Decorator marking a test that requires torch with a specific version or greater."""

    def decorator(test_case):
        correct_torch_version = is_torch_available() and is_torch_version(">=", torch_version)
        return unittest.skipUnless(
            correct_torch_version, f"test requires torch with the version greater than or equal to {torch_version}"
        )(test_case)

    return decorator


322
323
324
325
326
327
328
329
330
331
332
333
def require_torch_version_greater(torch_version):
    """Decorator marking a test that requires torch with a specific version greater."""

    def decorator(test_case):
        correct_torch_version = is_torch_available() and is_torch_version(">", torch_version)
        return unittest.skipUnless(
            correct_torch_version, f"test requires torch with the version greater than {torch_version}"
        )(test_case)

    return decorator


334
335
336
337
338
339
340
def require_torch_gpu(test_case):
    """Decorator marking a test that requires CUDA and PyTorch."""
    return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")(
        test_case
    )


341
342
def require_torch_cuda_compatibility(expected_compute_capability):
    def decorator(test_case):
343
        if torch.cuda.is_available():
344
345
346
347
348
349
350
351
352
            current_compute_capability = get_torch_cuda_device_capability()
            return unittest.skipUnless(
                float(current_compute_capability) == float(expected_compute_capability),
                "Test not supported for this compute capability.",
            )

    return decorator


Arsalan's avatar
Arsalan committed
353
354
355
356
357
358
359
360
# These decorators are for accelerator-specific behaviours that are not GPU-specific
def require_torch_accelerator(test_case):
    """Decorator marking a test that requires an accelerator backend and PyTorch."""
    return unittest.skipUnless(is_torch_available() and torch_device != "cpu", "test requires accelerator+PyTorch")(
        test_case
    )


361
362
363
364
365
366
367
368
369
370
371
372
373
374
def require_torch_multi_gpu(test_case):
    """
    Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
    multiple GPUs. To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests
    -k "multi_gpu"
    """
    if not is_torch_available():
        return unittest.skip("test requires PyTorch")(test_case)

    import torch

    return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)


375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
def require_torch_multi_accelerator(test_case):
    """
    Decorator marking a test that requires a multi-accelerator setup (in PyTorch). These tests are skipped on a machine
    without multiple hardware accelerators.
    """
    if not is_torch_available():
        return unittest.skip("test requires PyTorch")(test_case)

    import torch

    return unittest.skipUnless(
        torch.cuda.device_count() > 1 or torch.xpu.device_count() > 1, "test requires multiple hardware accelerators"
    )(test_case)


Arsalan's avatar
Arsalan committed
390
391
392
393
394
395
396
397
398
399
400
401
402
403
def require_torch_accelerator_with_fp16(test_case):
    """Decorator marking a test that requires an accelerator with support for the FP16 data type."""
    return unittest.skipUnless(_is_torch_fp16_available(torch_device), "test requires accelerator with fp16 support")(
        test_case
    )


def require_torch_accelerator_with_fp64(test_case):
    """Decorator marking a test that requires an accelerator with support for the FP64 data type."""
    return unittest.skipUnless(_is_torch_fp64_available(torch_device), "test requires accelerator with fp64 support")(
        test_case
    )


404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
def require_big_gpu_with_torch_cuda(test_case):
    """
    Decorator marking a test that requires a bigger GPU (24GB) for execution. Some example pipelines: Flux, SD3, Cog,
    etc.
    """
    if not is_torch_available():
        return unittest.skip("test requires PyTorch")(test_case)

    import torch

    if not torch.cuda.is_available():
        return unittest.skip("test requires PyTorch CUDA")(test_case)

    device_properties = torch.cuda.get_device_properties(0)
    total_memory = device_properties.total_memory / (1024**3)
    return unittest.skipUnless(
        total_memory >= BIG_GPU_MEMORY, f"test requires a GPU with at least {BIG_GPU_MEMORY} GB memory"
    )(test_case)


424
425
426
427
428
def require_big_accelerator(test_case):
    """
    Decorator marking a test that requires a bigger hardware accelerator (24GB) for execution. Some example pipelines:
    Flux, SD3, Cog, etc.
    """
429
430
431
432
    import pytest

    test_case = pytest.mark.big_accelerator(test_case)

433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
    if not is_torch_available():
        return unittest.skip("test requires PyTorch")(test_case)

    import torch

    if not (torch.cuda.is_available() or torch.xpu.is_available()):
        return unittest.skip("test requires PyTorch CUDA")(test_case)

    if torch.xpu.is_available():
        device_properties = torch.xpu.get_device_properties(0)
    else:
        device_properties = torch.cuda.get_device_properties(0)

    total_memory = device_properties.total_memory / (1024**3)
    return unittest.skipUnless(
        total_memory >= BIG_GPU_MEMORY,
        f"test requires a hardware accelerator with at least {BIG_GPU_MEMORY} GB memory",
    )(test_case)


Arsalan's avatar
Arsalan committed
453
454
455
456
457
458
459
460
def require_torch_accelerator_with_training(test_case):
    """Decorator marking a test that requires an accelerator with support for training."""
    return unittest.skipUnless(
        is_torch_available() and backend_supports_training(torch_device),
        "test requires accelerator with training support",
    )(test_case)


461
462
463
464
465
def skip_mps(test_case):
    """Decorator marking a test to skip if torch_device is 'mps'"""
    return unittest.skipUnless(torch_device != "mps", "test requires non 'mps' device")(test_case)


466
467
468
469
470
471
472
def require_flax(test_case):
    """
    Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
    """
    return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)


473
474
475
476
477
478
479
480
def require_compel(test_case):
    """
    Decorator marking a test that requires compel: https://github.com/damian0815/compel. These tests are skipped when
    the library is not installed.
    """
    return unittest.skipUnless(is_compel_available(), "test requires compel")(test_case)


481
482
483
484
485
486
487
def require_onnxruntime(test_case):
    """
    Decorator marking a test that requires onnxruntime. These tests are skipped when onnxruntime isn't installed.
    """
    return unittest.skipUnless(is_onnx_available(), "test requires onnxruntime")(test_case)


488
489
490
491
492
493
494
def require_note_seq(test_case):
    """
    Decorator marking a test that requires note_seq. These tests are skipped when note_seq isn't installed.
    """
    return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case)


495
496
497
498
499
500
501
502
def require_accelerator(test_case):
    """
    Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no
    hardware accelerator available.
    """
    return unittest.skipUnless(torch_device != "cpu", "test requires a hardware accelerator")(test_case)


503
504
505
506
507
508
509
def require_torchsde(test_case):
    """
    Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed.
    """
    return unittest.skipUnless(is_torchsde_available(), "test requires torchsde")(test_case)


510
511
512
513
514
515
516
517
def require_peft_backend(test_case):
    """
    Decorator marking a test that requires PEFT backend, this would require some specific versions of PEFT and
    transformers.
    """
    return unittest.skipUnless(USE_PEFT_BACKEND, "test requires PEFT backend")(test_case)


518
519
520
521
522
523
524
def require_timm(test_case):
    """
    Decorator marking a test that requires timm. These tests are skipped when timm isn't installed.
    """
    return unittest.skipUnless(is_timm_available(), "test requires timm")(test_case)


525
526
527
528
529
530
531
def require_bitsandbytes(test_case):
    """
    Decorator marking a test that requires bitsandbytes. These tests are skipped when bitsandbytes isn't installed.
    """
    return unittest.skipUnless(is_bitsandbytes_available(), "test requires bitsandbytes")(test_case)


532
533
534
535
536
537
538
def require_quanto(test_case):
    """
    Decorator marking a test that requires quanto. These tests are skipped when quanto isn't installed.
    """
    return unittest.skipUnless(is_optimum_quanto_available(), "test requires quanto")(test_case)


539
540
541
542
543
544
545
def require_accelerate(test_case):
    """
    Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
    """
    return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case)


546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
def require_peft_version_greater(peft_version):
    """
    Decorator marking a test that requires PEFT backend with a specific version, this would require some specific
    versions of PEFT and transformers.
    """

    def decorator(test_case):
        correct_peft_version = is_peft_available() and version.parse(
            version.parse(importlib.metadata.version("peft")).base_version
        ) > version.parse(peft_version)
        return unittest.skipUnless(
            correct_peft_version, f"test requires PEFT backend with the version greater than {peft_version}"
        )(test_case)

    return decorator


563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
def require_transformers_version_greater(transformers_version):
    """
    Decorator marking a test that requires transformers with a specific version, this would require some specific
    versions of PEFT and transformers.
    """

    def decorator(test_case):
        correct_transformers_version = is_transformers_available() and version.parse(
            version.parse(importlib.metadata.version("transformers")).base_version
        ) > version.parse(transformers_version)
        return unittest.skipUnless(
            correct_transformers_version,
            f"test requires transformers with the version greater than {transformers_version}",
        )(test_case)

    return decorator


581
582
def require_accelerate_version_greater(accelerate_version):
    def decorator(test_case):
583
        correct_accelerate_version = is_accelerate_available() and version.parse(
584
585
586
587
588
589
590
591
592
            version.parse(importlib.metadata.version("accelerate")).base_version
        ) > version.parse(accelerate_version)
        return unittest.skipUnless(
            correct_accelerate_version, f"Test requires accelerate with the version greater than {accelerate_version}."
        )(test_case)

    return decorator


593
594
595
596
597
598
599
def require_bitsandbytes_version_greater(bnb_version):
    def decorator(test_case):
        correct_bnb_version = is_bitsandbytes_available() and version.parse(
            version.parse(importlib.metadata.version("bitsandbytes")).base_version
        ) > version.parse(bnb_version)
        return unittest.skipUnless(
            correct_bnb_version, f"Test requires bitsandbytes with the version greater than {bnb_version}."
Marc Sun's avatar
Marc Sun committed
600
601
602
603
604
605
606
607
608
609
610
611
        )(test_case)

    return decorator


def require_hf_hub_version_greater(hf_hub_version):
    def decorator(test_case):
        correct_hf_hub_version = version.parse(
            version.parse(importlib.metadata.version("huggingface_hub")).base_version
        ) > version.parse(hf_hub_version)
        return unittest.skipUnless(
            correct_hf_hub_version, f"Test requires huggingface_hub with the version greater than {hf_hub_version}."
612
613
614
615
616
        )(test_case)

    return decorator


617
618
619
620
621
622
623
624
625
626
627
628
def require_gguf_version_greater_or_equal(gguf_version):
    def decorator(test_case):
        correct_gguf_version = is_gguf_available() and version.parse(
            version.parse(importlib.metadata.version("gguf")).base_version
        ) >= version.parse(gguf_version)
        return unittest.skipUnless(
            correct_gguf_version, f"Test requires gguf with the version greater than {gguf_version}."
        )(test_case)

    return decorator


629
def require_torchao_version_greater_or_equal(torchao_version):
Aryan's avatar
Aryan committed
630
631
632
    def decorator(test_case):
        correct_torchao_version = is_torchao_available() and version.parse(
            version.parse(importlib.metadata.version("torchao")).base_version
633
        ) >= version.parse(torchao_version)
Aryan's avatar
Aryan committed
634
635
636
637
638
639
640
        return unittest.skipUnless(
            correct_torchao_version, f"Test requires torchao with version greater than {torchao_version}."
        )(test_case)

    return decorator


641
642
643
644
645
646
647
648
649
650
651
652
def require_kernels_version_greater_or_equal(kernels_version):
    def decorator(test_case):
        correct_kernels_version = is_kernels_available() and version.parse(
            version.parse(importlib.metadata.version("kernels")).base_version
        ) >= version.parse(kernels_version)
        return unittest.skipUnless(
            correct_kernels_version, f"Test requires kernels with version greater than {kernels_version}."
        )(test_case)

    return decorator


653
654
655
656
657
658
659
def deprecate_after_peft_backend(test_case):
    """
    Decorator marking a test that will be skipped after PEFT backend
    """
    return unittest.skipUnless(not USE_PEFT_BACKEND, "test skipped in favor of PEFT backend")(test_case)


660
661
662
663
664
665
def get_python_version():
    sys_info = sys.version_info
    major, minor = sys_info.major, sys_info.minor
    return major, minor


666
def load_numpy(arry: Union[str, np.ndarray], local_path: Optional[str] = None) -> np.ndarray:
667
    if isinstance(arry, str):
668
669
        if local_path is not None:
            # local_path can be passed to correct images of tests
670
            return Path(local_path, arry.split("/")[-5], arry.split("/")[-2], arry.split("/")[-1]).as_posix()
671
        elif arry.startswith("http://") or arry.startswith("https://"):
672
            response = requests.get(arry, timeout=DIFFUSERS_REQUEST_TIMEOUT)
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
            response.raise_for_status()
            arry = np.load(BytesIO(response.content))
        elif os.path.isfile(arry):
            arry = np.load(arry)
        else:
            raise ValueError(
                f"Incorrect path or url, URLs must start with `http://` or `https://`, and {arry} is not a valid path"
            )
    elif isinstance(arry, np.ndarray):
        pass
    else:
        raise ValueError(
            "Incorrect format used for numpy ndarray. Should be an url linking to an image, a local path, or a"
            " ndarray."
        )

    return arry


692
def load_pt(url: str, map_location: Optional[str] = None, weights_only: Optional[bool] = True):
693
    response = requests.get(url, timeout=DIFFUSERS_REQUEST_TIMEOUT)
694
    response.raise_for_status()
695
    arry = torch.load(BytesIO(response.content), map_location=map_location, weights_only=weights_only)
696
697
698
    return arry


699
700
701
def load_image(image: Union[str, PIL.Image.Image]) -> PIL.Image.Image:
    """
    Loads `image` to a PIL Image.
Steven Liu's avatar
Steven Liu committed
702
703

    Args:
704
705
706
        image (`str` or `PIL.Image.Image`):
            The image to convert to the PIL Image format.
    Returns:
Steven Liu's avatar
Steven Liu committed
707
708
        `PIL.Image.Image`:
            A PIL Image.
709
710
711
    """
    if isinstance(image, str):
        if image.startswith("http://") or image.startswith("https://"):
712
            image = PIL.Image.open(requests.get(image, stream=True, timeout=DIFFUSERS_REQUEST_TIMEOUT).raw)
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
        elif os.path.isfile(image):
            image = PIL.Image.open(image)
        else:
            raise ValueError(
                f"Incorrect path or url, URLs must start with `http://` or `https://`, and {image} is not a valid path"
            )
    elif isinstance(image, PIL.Image.Image):
        image = image
    else:
        raise ValueError(
            "Incorrect format used for image. Should be an url linking to an image, a local path, or a PIL image."
        )
    image = PIL.ImageOps.exif_transpose(image)
    image = image.convert("RGB")
    return image
728
729


730
731
732
733
734
735
736
737
738
739
def preprocess_image(image: PIL.Image, batch_size: int):
    w, h = image.size
    w, h = (x - x % 8 for x in (w, h))  # resize to integer multiple of 8
    image = image.resize((w, h), resample=PIL.Image.LANCZOS)
    image = np.array(image).astype(np.float32) / 255.0
    image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size)
    image = torch.from_numpy(image)
    return 2.0 * image - 1.0


YiYi Xu's avatar
YiYi Xu committed
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None) -> str:
    if output_gif_path is None:
        output_gif_path = tempfile.NamedTemporaryFile(suffix=".gif").name

    image[0].save(
        output_gif_path,
        save_all=True,
        append_images=image[1:],
        optimize=False,
        duration=100,
        loop=0,
    )
    return output_gif_path


755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
@contextmanager
def buffered_writer(raw_f):
    f = io.BufferedWriter(raw_f)
    yield f
    f.flush()


def export_to_ply(mesh, output_ply_path: str = None):
    """
    Write a PLY file for a mesh.
    """
    if output_ply_path is None:
        output_ply_path = tempfile.NamedTemporaryFile(suffix=".ply").name

    coords = mesh.verts.detach().cpu().numpy()
    faces = mesh.faces.cpu().numpy()
    rgb = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1)

    with buffered_writer(open(output_ply_path, "wb")) as f:
        f.write(b"ply\n")
        f.write(b"format binary_little_endian 1.0\n")
        f.write(bytes(f"element vertex {len(coords)}\n", "ascii"))
        f.write(b"property float x\n")
        f.write(b"property float y\n")
        f.write(b"property float z\n")
        if rgb is not None:
            f.write(b"property uchar red\n")
            f.write(b"property uchar green\n")
            f.write(b"property uchar blue\n")
        if faces is not None:
            f.write(bytes(f"element face {len(faces)}\n", "ascii"))
            f.write(b"property list uchar int vertex_index\n")
        f.write(b"end_header\n")

        if rgb is not None:
            rgb = (rgb * 255.499).round().astype(int)
            vertices = [
                (*coord, *rgb)
                for coord, rgb in zip(
                    coords.tolist(),
                    rgb.tolist(),
                )
            ]
            format = struct.Struct("<3f3B")
            for item in vertices:
                f.write(format.pack(*item))
        else:
            format = struct.Struct("<3f")
            for vertex in coords.tolist():
                f.write(format.pack(*vertex))

        if faces is not None:
            for tri in faces.tolist():
                f.write(format.pack(len(tri), *tri))
809
            format = struct.Struct("<B3I")
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
    return output_ply_path


def export_to_obj(mesh, output_obj_path: str = None):
    if output_obj_path is None:
        output_obj_path = tempfile.NamedTemporaryFile(suffix=".obj").name

    verts = mesh.verts.detach().cpu().numpy()
    faces = mesh.faces.cpu().numpy()

    vertex_colors = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1)
    vertices = [
        "{} {} {} {} {} {}".format(*coord, *color) for coord, color in zip(verts.tolist(), vertex_colors.tolist())
    ]

    faces = ["f {} {} {}".format(str(tri[0] + 1), str(tri[1] + 1), str(tri[2] + 1)) for tri in faces.tolist()]

    combined_data = ["v " + vertex for vertex in vertices] + faces

    with open(output_obj_path, "w") as f:
        f.writelines("\n".join(combined_data))


833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
def export_to_video(video_frames: List[np.ndarray], output_video_path: str = None) -> str:
    if is_opencv_available():
        import cv2
    else:
        raise ImportError(BACKENDS_MAPPING["opencv"][1].format("export_to_video"))
    if output_video_path is None:
        output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name

    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    h, w, c = video_frames[0].shape
    video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=8, frameSize=(w, h))
    for i in range(len(video_frames)):
        img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR)
        video_writer.write(img)
    return output_video_path


850
def load_hf_numpy(path) -> np.ndarray:
851
852
853
    base_url = "https://huggingface.co/datasets/fusing/diffusers-testing/resolve/main"

    if not path.startswith("http://") and not path.startswith("https://"):
854
        path = os.path.join(base_url, urllib.parse.quote(path))
855

856
    return load_numpy(path)
857
858


859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
# --- pytest conf functions --- #

# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}


def pytest_addoption_shared(parser):
    """
    This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.

    It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
    option.

    """
    option = "--make-reports"
    if option not in pytest_opt_registered:
        parser.addoption(
            option,
            action="store",
            default=False,
            help="generate report files. The value of this option is used as a prefix to report names",
        )
        pytest_opt_registered[option] = 1


def pytest_terminal_summary_main(tr, id):
    """
    Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
    directory. The report files are prefixed with the test suite name.

    This function emulates --duration and -rA pytest arguments.

    This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
    there.

    Args:
    - tr: `terminalreporter` passed from `conftest.py`
    - id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
      needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.

    NB: this functions taps into a private _pytest API and while unlikely, it could break should
    pytest do internal changes - also it calls default internal methods of terminalreporter which
    can be hijacked by various `pytest-` plugins and interfere.

    """
    from _pytest.config import create_terminal_writer

    if not len(id):
        id = "tests"

    config = tr.config
    orig_writer = config.get_terminal_writer()
    orig_tbstyle = config.option.tbstyle
    orig_reportchars = tr.reportchars

    dir = "reports"
    Path(dir).mkdir(parents=True, exist_ok=True)
    report_files = {
        k: f"{dir}/{id}_{k}.txt"
        for k in [
            "durations",
            "errors",
            "failures_long",
            "failures_short",
            "failures_line",
            "passes",
            "stats",
            "summary_short",
            "warnings",
        ]
    }

    # custom durations report
    # note: there is no need to call pytest --durations=XX to get this separate report
    # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
    dlist = []
    for replist in tr.stats.values():
        for rep in replist:
            if hasattr(rep, "duration"):
                dlist.append(rep)
    if dlist:
        dlist.sort(key=lambda x: x.duration, reverse=True)
        with open(report_files["durations"], "w") as f:
            durations_min = 0.05  # sec
            f.write("slowest durations\n")
            for i, rep in enumerate(dlist):
                if rep.duration < durations_min:
946
                    f.write(f"{len(dlist) - i} durations < {durations_min} secs were omitted")
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
                    break
                f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")

    def summary_failures_short(tr):
        # expecting that the reports were --tb=long (default) so we chop them off here to the last frame
        reports = tr.getreports("failed")
        if not reports:
            return
        tr.write_sep("=", "FAILURES SHORT STACK")
        for rep in reports:
            msg = tr._getfailureheadline(rep)
            tr.write_sep("_", msg, red=True, bold=True)
            # chop off the optional leading extra frames, leaving only the last one
            longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
            tr._tw.line(longrepr)
            # note: not printing out any rep.sections to keep the report short

    # use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
    # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
    # note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
    # pytest-instafail does that)

    # report failures with line/short/long styles
    config.option.tbstyle = "auto"  # full tb
    with open(report_files["failures_long"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_failures()

    # config.option.tbstyle = "short" # short tb
    with open(report_files["failures_short"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        summary_failures_short(tr)

    config.option.tbstyle = "line"  # one line per error
    with open(report_files["failures_line"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_failures()

    with open(report_files["errors"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_errors()

    with open(report_files["warnings"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_warnings()  # normal warnings
        tr.summary_warnings()  # final warnings

    tr.reportchars = "wPpsxXEf"  # emulate -rA (used in summary_passes() and short_test_summary())
    with open(report_files["passes"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_passes()

    with open(report_files["summary_short"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.short_test_summary()

    with open(report_files["stats"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_stats()

    # restore:
    tr._tw = orig_writer
    tr.reportchars = orig_reportchars
    config.option.tbstyle = orig_tbstyle
1011
1012


1013
# Adapted from https://github.com/huggingface/transformers/blob/000e52aec8850d3fe2f360adc6fd256e5b47fe4c/src/transformers/testing_utils.py#L1905
1014
1015
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
    """
1016
    To decorate flaky tests (methods or entire classes). They will be retried on failures.
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027

    Args:
        max_attempts (`int`, *optional*, defaults to 5):
            The maximum number of attempts to retry the flaky test.
        wait_before_retry (`float`, *optional*):
            If provided, will wait that number of seconds before retrying the test.
        description (`str`, *optional*):
            A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
            etc.)
    """

1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
    def decorator(obj):
        # If decorating a class, wrap each test method on it
        if inspect.isclass(obj):
            for attr_name, attr_value in list(obj.__dict__.items()):
                if callable(attr_value) and attr_name.startswith("test"):
                    # recursively decorate the method
                    setattr(obj, attr_name, decorator(attr_value))
            return obj

        # Otherwise we're decorating a single test function / method
        @functools.wraps(obj)
1039
1040
1041
1042
        def wrapper(*args, **kwargs):
            retry_count = 1
            while retry_count < max_attempts:
                try:
1043
                    return obj(*args, **kwargs)
1044
                except Exception as err:
1045
1046
1047
1048
1049
                    msg = (
                        f"[FLAKY] {description or obj.__name__!r} "
                        f"failed on attempt {retry_count}/{max_attempts}: {err}"
                    )
                    print(msg, file=sys.stderr)
1050
1051
1052
1053
                    if wait_before_retry is not None:
                        time.sleep(wait_before_retry)
                    retry_count += 1

1054
            return obj(*args, **kwargs)
1055
1056
1057
1058
1059
1060

        return wrapper

    return decorator


1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
# Taken from: https://github.com/huggingface/transformers/blob/3658488ff77ff8d45101293e749263acf437f4d5/src/transformers/testing_utils.py#L1787
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
    """
    To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.

    Args:
        test_case (`unittest.TestCase`):
            The test that will run `target_func`.
        target_func (`Callable`):
            The function implementing the actual testing logic.
        inputs (`dict`, *optional*, defaults to `None`):
            The inputs that will be passed to `target_func` through an (input) queue.
        timeout (`int`, *optional*, defaults to `None`):
            The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
            variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
    """
    if timeout is None:
        timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))

    start_methohd = "spawn"
    ctx = multiprocessing.get_context(start_methohd)

    input_queue = ctx.Queue(1)
    output_queue = ctx.JoinableQueue(1)

    # We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
    input_queue.put(inputs, timeout=timeout)

    process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
    process.start()
    # Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
    # the test to exit properly.
    try:
        results = output_queue.get(timeout=timeout)
        output_queue.task_done()
    except Exception as e:
        process.terminate()
        test_case.fail(e)
    process.join(timeout=timeout)

    if results["error"] is not None:
1102
        test_case.fail(f"{results['error']}")
1103
1104


1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
class CaptureLogger:
    """
    Args:
    Context manager to capture `logging` streams
        logger: 'logging` logger object
    Returns:
        The captured output is available via `self.out`
    Example:
    ```python
    >>> from diffusers import logging
    >>> from diffusers.testing_utils import CaptureLogger

    >>> msg = "Testing 1, 2, 3"
    >>> logging.set_verbosity_info()
    >>> logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.py")
    >>> with CaptureLogger(logger) as cl:
    ...     logger.info(msg)
    >>> assert cl.out, msg + "\n"
    ```
    """

    def __init__(self, logger):
        self.logger = logger
        self.io = StringIO()
        self.sh = logging.StreamHandler(self.io)
        self.out = ""

    def __enter__(self):
        self.logger.addHandler(self.sh)
        return self

    def __exit__(self, *exc):
        self.logger.removeHandler(self.sh)
        self.out = self.io.getvalue()

    def __repr__(self):
        return f"captured: {self.out}\n"
1142
1143
1144
1145
1146
1147
1148


def enable_full_determinism():
    """
    Helper function for reproducible behavior during distributed training. See
    - https://pytorch.org/docs/stable/notes/randomness.html for pytorch
    """
1149
    from .torch_utils import enable_full_determinism as _enable_full_determinism
1150

1151
1152
1153
1154
1155
    logger.warning(
        "enable_full_determinism has been moved to diffusers.utils.torch_utils. "
        "Importing from diffusers.utils.testing_utils is deprecated and will be removed in a future version."
    )
    return _enable_full_determinism()
1156
1157
1158


def disable_full_determinism():
1159
1160
1161
1162
1163
1164
1165
    from .torch_utils import disable_full_determinism as _disable_full_determinism

    logger.warning(
        "disable_full_determinism has been moved to diffusers.utils.torch_utils. "
        "Importing from diffusers.utils.testing_utils is deprecated and will be removed in a future version."
    )
    return _disable_full_determinism()
Arsalan's avatar
Arsalan committed
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178


# Utils for custom and alternative accelerator devices
def _is_torch_fp16_available(device):
    if not is_torch_available():
        return False

    import torch

    device = torch.device(device)

    try:
        x = torch.zeros((2, 2), dtype=torch.float16).to(device)
Dhruv Nair's avatar
Dhruv Nair committed
1179
1180
1181
        _ = torch.mul(x, x)
        return True

Arsalan's avatar
Arsalan committed
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
    except Exception as e:
        if device.type == "cuda":
            raise ValueError(
                f"You have passed a device of type 'cuda' which should work with 'fp16', but 'cuda' does not seem to be correctly installed on your machine: {e}"
            )

        return False


def _is_torch_fp64_available(device):
    if not is_torch_available():
        return False

    import torch

1197
1198
    device = torch.device(device)

Arsalan's avatar
Arsalan committed
1199
1200
    try:
        x = torch.zeros((2, 2), dtype=torch.float64).to(device)
Dhruv Nair's avatar
Dhruv Nair committed
1201
1202
1203
        _ = torch.mul(x, x)
        return True

Arsalan's avatar
Arsalan committed
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
    except Exception as e:
        if device.type == "cuda":
            raise ValueError(
                f"You have passed a device of type 'cuda' which should work with 'fp64', but 'cuda' does not seem to be correctly installed on your machine: {e}"
            )

        return False


# Guard these lookups for when Torch is not used - alternative accelerator support is for PyTorch
if is_torch_available():
    # Behaviour flags
1216
    BACKEND_SUPPORTS_TRAINING = {"cuda": True, "xpu": True, "cpu": True, "mps": False, "default": True}
Arsalan's avatar
Arsalan committed
1217
1218

    # Function definitions
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
    BACKEND_EMPTY_CACHE = {
        "cuda": torch.cuda.empty_cache,
        "xpu": torch.xpu.empty_cache,
        "cpu": None,
        "mps": torch.mps.empty_cache,
        "default": None,
    }
    BACKEND_DEVICE_COUNT = {
        "cuda": torch.cuda.device_count,
        "xpu": torch.xpu.device_count,
        "cpu": lambda: 0,
        "mps": lambda: 0,
        "default": 0,
    }
    BACKEND_MANUAL_SEED = {
        "cuda": torch.cuda.manual_seed,
        "xpu": torch.xpu.manual_seed,
        "cpu": torch.manual_seed,
        "mps": torch.mps.manual_seed,
        "default": torch.manual_seed,
    }
    BACKEND_RESET_PEAK_MEMORY_STATS = {
        "cuda": torch.cuda.reset_peak_memory_stats,
        "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None),
        "cpu": None,
        "mps": None,
        "default": None,
    }
    BACKEND_RESET_MAX_MEMORY_ALLOCATED = {
        "cuda": torch.cuda.reset_max_memory_allocated,
1249
        "xpu": getattr(torch.xpu, "reset_peak_memory_stats", None),
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
        "cpu": None,
        "mps": None,
        "default": None,
    }
    BACKEND_MAX_MEMORY_ALLOCATED = {
        "cuda": torch.cuda.max_memory_allocated,
        "xpu": getattr(torch.xpu, "max_memory_allocated", None),
        "cpu": 0,
        "mps": 0,
        "default": 0,
    }
1261
1262
1263
1264
1265
1266
1267
    BACKEND_SYNCHRONIZE = {
        "cuda": torch.cuda.synchronize,
        "xpu": getattr(torch.xpu, "synchronize", None),
        "cpu": None,
        "mps": None,
        "default": None,
    }
Arsalan's avatar
Arsalan committed
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278


# This dispatches a defined function according to the accelerator from the function definitions.
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
    if device not in dispatch_table:
        return dispatch_table["default"](*args, **kwargs)

    fn = dispatch_table[device]

    # Some device agnostic functions return values. Need to guard against 'None' instead at
    # user level
1279
1280
    if not callable(fn):
        return fn
Arsalan's avatar
Arsalan committed
1281
1282
1283
1284
1285
1286

    return fn(*args, **kwargs)


# These are callables which automatically dispatch the function specific to the accelerator
def backend_manual_seed(device: str, seed: int):
1287
1288
1289
1290
1291
1292
1293
    from .torch_utils import backend_manual_seed as _backend_manual_seed

    logger.warning(
        "backend_manual_seed has been moved to diffusers.utils.torch_utils. "
        "diffusers.utils.testing_utils is deprecated and will be removed in a future version."
    )
    return _backend_manual_seed(device, seed)
Arsalan's avatar
Arsalan committed
1294
1295


1296
def backend_synchronize(device: str):
1297
1298
1299
1300
1301
1302
1303
    from .torch_utils import backend_synchronize as _backend_synchronize

    logger.warning(
        "backend_synchronize has been moved to diffusers.utils.torch_utils. "
        "diffusers.utils.testing_utils is deprecated and will be removed in a future version."
    )
    return _backend_synchronize(device)
1304
1305


Arsalan's avatar
Arsalan committed
1306
def backend_empty_cache(device: str):
1307
1308
1309
1310
1311
1312
1313
    from .torch_utils import backend_empty_cache as _backend_empty_cache

    logger.warning(
        "backend_empty_cache has been moved to diffusers.utils.torch_utils. "
        "diffusers.utils.testing_utils is deprecated and will be removed in a future version."
    )
    return _backend_empty_cache(device)
Arsalan's avatar
Arsalan committed
1314
1315
1316


def backend_device_count(device: str):
1317
1318
1319
1320
1321
1322
1323
    from .torch_utils import backend_device_count as _backend_device_count

    logger.warning(
        "backend_device_count has been moved to diffusers.utils.torch_utils. "
        "diffusers.utils.testing_utils is deprecated and will be removed in a future version."
    )
    return _backend_device_count(device)
Arsalan's avatar
Arsalan committed
1324
1325


1326
def backend_reset_peak_memory_stats(device: str):
1327
1328
1329
1330
1331
1332
1333
    from .torch_utils import backend_reset_peak_memory_stats as _backend_reset_peak_memory_stats

    logger.warning(
        "backend_reset_peak_memory_stats has been moved to diffusers.utils.torch_utils. "
        "diffusers.utils.testing_utils is deprecated and will be removed in a future version."
    )
    return _backend_reset_peak_memory_stats(device)
1334
1335
1336


def backend_reset_max_memory_allocated(device: str):
1337
1338
1339
1340
1341
1342
1343
    from .torch_utils import backend_reset_max_memory_allocated as _backend_reset_max_memory_allocated

    logger.warning(
        "backend_reset_max_memory_allocated has been moved to diffusers.utils.torch_utils. "
        "diffusers.utils.testing_utils is deprecated and will be removed in a future version."
    )
    return _backend_reset_max_memory_allocated(device)
1344
1345
1346


def backend_max_memory_allocated(device: str):
1347
1348
1349
1350
1351
1352
1353
    from .torch_utils import backend_max_memory_allocated as _backend_max_memory_allocated

    logger.warning(
        "backend_max_memory_allocated has been moved to diffusers.utils.torch_utils. "
        "diffusers.utils.testing_utils is deprecated and will be removed in a future version."
    )
    return _backend_max_memory_allocated(device)
1354
1355


Arsalan's avatar
Arsalan committed
1356
1357
1358
# These are callables which return boolean behaviour flags and can be used to specify some
# device agnostic alternative where the feature is unsupported.
def backend_supports_training(device: str):
1359
    from .torch_utils import backend_supports_training as _backend_supports_training
Arsalan's avatar
Arsalan committed
1360

1361
1362
1363
1364
1365
    logger.warning(
        "backend_supports_training has been moved to diffusers.utils.torch_utils. "
        "diffusers.utils.testing_utils is deprecated and will be removed in a future version."
    )
    return _backend_supports_training(device)
Arsalan's avatar
Arsalan committed
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411


# Guard for when Torch is not available
if is_torch_available():
    # Update device function dict mapping
    def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
        try:
            # Try to import the function directly
            spec_fn = getattr(device_spec_module, attribute_name)
            device_fn_dict[torch_device] = spec_fn
        except AttributeError as e:
            # If the function doesn't exist, and there is no default, throw an error
            if "default" not in device_fn_dict:
                raise AttributeError(
                    f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
                ) from e

    if "DIFFUSERS_TEST_DEVICE_SPEC" in os.environ:
        device_spec_path = os.environ["DIFFUSERS_TEST_DEVICE_SPEC"]
        if not Path(device_spec_path).is_file():
            raise ValueError(f"Specified path to device specification file is not found. Received {device_spec_path}")

        try:
            import_name = device_spec_path[: device_spec_path.index(".py")]
        except ValueError as e:
            raise ValueError(f"Provided device spec file is not a Python file! Received {device_spec_path}") from e

        device_spec_module = importlib.import_module(import_name)

        try:
            device_name = device_spec_module.DEVICE_NAME
        except AttributeError:
            raise AttributeError("Device spec file did not contain `DEVICE_NAME`")

        if "DIFFUSERS_TEST_DEVICE" in os.environ and torch_device != device_name:
            msg = f"Mismatch between environment variable `DIFFUSERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
            msg += "Either unset `DIFFUSERS_TEST_DEVICE` or ensure it matches device spec name."
            raise ValueError(msg)

        torch_device = device_name

        # Add one entry here for each `BACKEND_*` dictionary.
        update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
        update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
        update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
        update_mapping_from_spec(BACKEND_SUPPORTS_TRAINING, "SUPPORTS_TRAINING")
1412
1413
1414
        update_mapping_from_spec(BACKEND_RESET_PEAK_MEMORY_STATS, "RESET_PEAK_MEMORY_STATS_FN")
        update_mapping_from_spec(BACKEND_RESET_MAX_MEMORY_ALLOCATED, "RESET_MAX_MEMORY_ALLOCATED_FN")
        update_mapping_from_spec(BACKEND_MAX_MEMORY_ALLOCATED, "MAX_MEMORY_ALLOCATED_FN")
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452


# Modified from https://github.com/huggingface/transformers/blob/cdfb018d0300fef3b07d9220f3efe9c2a9974662/src/transformers/testing_utils.py#L3090

# Type definition of key used in `Expectations` class.
DeviceProperties = Tuple[Union[str, None], Union[int, None]]


@functools.lru_cache
def get_device_properties() -> DeviceProperties:
    """
    Get environment device properties.
    """
    if IS_CUDA_SYSTEM or IS_ROCM_SYSTEM:
        import torch

        major, _ = torch.cuda.get_device_capability()
        if IS_ROCM_SYSTEM:
            return ("rocm", major)
        else:
            return ("cuda", major)
    elif IS_XPU_SYSTEM:
        import torch

        # To get more info of the architecture meaning and bit allocation, refer to https://github.com/intel/llvm/blob/sycl/sycl/include/sycl/ext/oneapi/experimental/device_architecture.def
        arch = torch.xpu.get_device_capability()["architecture"]
        gen_mask = 0x000000FF00000000
        gen = (arch & gen_mask) >> 32
        return ("xpu", gen)
    else:
        return (torch_device, None)


if TYPE_CHECKING:
    DevicePropertiesUserDict = UserDict[DeviceProperties, Any]
else:
    DevicePropertiesUserDict = UserDict

1453
if is_torch_available():
1454
    from diffusers.hooks._common import _GO_LC_SUPPORTED_PYTORCH_LAYERS
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
    from diffusers.hooks.group_offloading import (
        _GROUP_ID_LAZY_LEAF,
        _compute_group_hash,
        _find_parent_module_in_module_dict,
        _gather_buffers_with_no_group_offloading_parent,
        _gather_parameters_with_no_group_offloading_parent,
    )

    def _get_expected_safetensors_files(
        module: torch.nn.Module,
        offload_to_disk_path: str,
        offload_type: str,
        num_blocks_per_group: Optional[int] = None,
    ) -> Set[str]:
        expected_files = set()

        def get_hashed_filename(group_id: str) -> str:
            short_hash = _compute_group_hash(group_id)
            return os.path.join(offload_to_disk_path, f"group_{short_hash}.safetensors")

        if offload_type == "block_level":
            if num_blocks_per_group is None:
                raise ValueError("num_blocks_per_group must be provided for 'block_level' offloading.")

            # Handle groups of ModuleList and Sequential blocks
            unmatched_modules = []
            for name, submodule in module.named_children():
                if not isinstance(submodule, (torch.nn.ModuleList, torch.nn.Sequential)):
                    unmatched_modules.append(module)
                    continue

                for i in range(0, len(submodule), num_blocks_per_group):
                    current_modules = submodule[i : i + num_blocks_per_group]
                    if not current_modules:
                        continue
                    group_id = f"{name}_{i}_{i + len(current_modules) - 1}"
                    expected_files.add(get_hashed_filename(group_id))

            # Handle the group for unmatched top-level modules and parameters
            for module in unmatched_modules:
                expected_files.add(get_hashed_filename(f"{module.__class__.__name__}_unmatched_group"))

        elif offload_type == "leaf_level":
            # Handle leaf-level module groups
            for name, submodule in module.named_modules():
1500
                if isinstance(submodule, _GO_LC_SUPPORTED_PYTORCH_LAYERS):
1501
1502
1503
1504
1505
                    # These groups will always have parameters, so a file is expected
                    expected_files.add(get_hashed_filename(name))

            # Handle groups for non-leaf parameters/buffers
            modules_with_group_offloading = {
1506
                name for name, sm in module.named_modules() if isinstance(sm, _GO_LC_SUPPORTED_PYTORCH_LAYERS)
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
            }
            parameters = _gather_parameters_with_no_group_offloading_parent(module, modules_with_group_offloading)
            buffers = _gather_buffers_with_no_group_offloading_parent(module, modules_with_group_offloading)

            all_orphans = parameters + buffers
            if all_orphans:
                parent_to_tensors = {}
                module_dict = dict(module.named_modules())
                for tensor_name, _ in all_orphans:
                    parent_name = _find_parent_module_in_module_dict(tensor_name, module_dict)
                    if parent_name not in parent_to_tensors:
                        parent_to_tensors[parent_name] = []
                    parent_to_tensors[parent_name].append(tensor_name)

                for parent_name in parent_to_tensors:
                    # A file is expected for each parent that gathers orphaned tensors
                    expected_files.add(get_hashed_filename(parent_name))
            expected_files.add(get_hashed_filename(_GROUP_ID_LAZY_LEAF))

        else:
            raise ValueError(f"Unsupported offload_type: {offload_type}")

        return expected_files

    def _check_safetensors_serialization(
        module: torch.nn.Module,
        offload_to_disk_path: str,
        offload_type: str,
        num_blocks_per_group: Optional[int] = None,
    ) -> bool:
        if not os.path.isdir(offload_to_disk_path):
            return False, None, None

        expected_files = _get_expected_safetensors_files(
            module, offload_to_disk_path, offload_type, num_blocks_per_group
        )
        actual_files = set(glob.glob(os.path.join(offload_to_disk_path, "*.safetensors")))
        missing_files = expected_files - actual_files
        extra_files = actual_files - expected_files

        is_correct = not missing_files and not extra_files
        return is_correct, extra_files, missing_files

1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601

class Expectations(DevicePropertiesUserDict):
    def get_expectation(self) -> Any:
        """
        Find best matching expectation based on environment device properties.
        """
        return self.find_expectation(get_device_properties())

    @staticmethod
    def is_default(key: DeviceProperties) -> bool:
        return all(p is None for p in key)

    @staticmethod
    def score(key: DeviceProperties, other: DeviceProperties) -> int:
        """
        Returns score indicating how similar two instances of the `Properties` tuple are. Points are calculated using
        bits, but documented as int. Rules are as follows:
            * Matching `type` gives 8 points.
            * Semi-matching `type`, for example cuda and rocm, gives 4 points.
            * Matching `major` (compute capability major version) gives 2 points.
            * Default expectation (if present) gives 1 points.
        """
        (device_type, major) = key
        (other_device_type, other_major) = other

        score = 0b0
        if device_type == other_device_type:
            score |= 0b1000
        elif device_type in ["cuda", "rocm"] and other_device_type in ["cuda", "rocm"]:
            score |= 0b100

        if major == other_major and other_major is not None:
            score |= 0b10

        if Expectations.is_default(other):
            score |= 0b1

        return int(score)

    def find_expectation(self, key: DeviceProperties = (None, None)) -> Any:
        """
        Find best matching expectation based on provided device properties.
        """
        (result_key, result) = max(self.data.items(), key=lambda x: Expectations.score(key, x[0]))

        if Expectations.score(key, result_key) == 0:
            raise ValueError(f"No matching expectation found for {key}")

        return result

    def __repr__(self):
        return f"{self.data}"