testing_utils.py 40.3 KB
Newer Older
1
import functools
2
import importlib
3
import importlib.metadata
Patrick von Platen's avatar
Patrick von Platen committed
4
import inspect
5
import io
6
import logging
7
import multiprocessing
Patrick von Platen's avatar
Patrick von Platen committed
8
9
import os
import random
10
import re
11
import struct
12
import sys
13
import tempfile
14
import time
Patrick von Platen's avatar
Patrick von Platen committed
15
import unittest
16
import urllib.parse
17
from contextlib import contextmanager
18
from io import BytesIO, StringIO
19
from pathlib import Path
Arsalan's avatar
Arsalan committed
20
from typing import Callable, Dict, List, Optional, Union
Patrick von Platen's avatar
Patrick von Platen committed
21

22
import numpy as np
23
24
25
import PIL.Image
import PIL.ImageOps
import requests
26
from numpy.linalg import norm
27
28
from packaging import version

29
30
from .import_utils import (
    BACKENDS_MAPPING,
31
32
    is_accelerate_available,
    is_bitsandbytes_available,
33
34
    is_compel_available,
    is_flax_available,
35
    is_gguf_available,
36
    is_note_seq_available,
37
38
    is_onnx_available,
    is_opencv_available,
39
    is_peft_available,
40
    is_timm_available,
41
    is_torch_available,
42
    is_torch_version,
Aryan's avatar
Aryan committed
43
    is_torchao_available,
44
    is_torchsde_available,
45
    is_transformers_available,
46
)
Will Berman's avatar
Will Berman committed
47
from .logging import get_logger
48

Patrick von Platen's avatar
Patrick von Platen committed
49
50

global_rng = random.Random()
51

Will Berman's avatar
Will Berman committed
52
logger = get_logger(__name__)
Patrick von Platen's avatar
Patrick von Platen committed
53

54
55
56
57
58
59
60
61
_required_peft_version = is_peft_available() and version.parse(
    version.parse(importlib.metadata.version("peft")).base_version
) > version.parse("0.5")
_required_transformers_version = is_transformers_available() and version.parse(
    version.parse(importlib.metadata.version("transformers")).base_version
) > version.parse("4.33")

USE_PEFT_BACKEND = _required_peft_version and _required_transformers_version
62
BIG_GPU_MEMORY = int(os.getenv("BIG_GPU_MEMORY", 40))
63

Patrick von Platen's avatar
Patrick von Platen committed
64
65
66
if is_torch_available():
    import torch

Arsalan's avatar
Arsalan committed
67
68
69
70
71
72
73
74
75
76
77
    # Set a backend environment variable for any extra module import required for a custom accelerator
    if "DIFFUSERS_TEST_BACKEND" in os.environ:
        backend = os.environ["DIFFUSERS_TEST_BACKEND"]
        try:
            _ = importlib.import_module(backend)
        except ModuleNotFoundError as e:
            raise ModuleNotFoundError(
                f"Failed to import `DIFFUSERS_TEST_BACKEND` '{backend}'! This should be the name of an installed module \
                    to enable a specified backend.):\n{e}"
            ) from e

Will Berman's avatar
Will Berman committed
78
79
    if "DIFFUSERS_TEST_DEVICE" in os.environ:
        torch_device = os.environ["DIFFUSERS_TEST_DEVICE"]
80
81
82
83
84
85
86
        try:
            # try creating device to see if provided device is valid
            _ = torch.device(torch_device)
        except RuntimeError as e:
            raise RuntimeError(
                f"Unknown testing device specified by environment variable `DIFFUSERS_TEST_DEVICE`: {torch_device}"
            ) from e
Will Berman's avatar
Will Berman committed
87
88
89
90
91
92
93
94
95
96
97
        logger.info(f"torch_device overrode to {torch_device}")
    else:
        torch_device = "cuda" if torch.cuda.is_available() else "cpu"
        is_torch_higher_equal_than_1_12 = version.parse(
            version.parse(torch.__version__).base_version
        ) >= version.parse("1.12")

        if is_torch_higher_equal_than_1_12:
            # Some builds of torch 1.12 don't have the mps backend registered. See #892 for more details
            mps_backend_registered = hasattr(torch.backends, "mps")
            torch_device = "mps" if (mps_backend_registered and torch.backends.mps.is_available()) else torch_device
Patrick von Platen's avatar
Patrick von Platen committed
98
99


100
101
102
103
104
105
106
107
def torch_all_close(a, b, *args, **kwargs):
    if not is_torch_available():
        raise ValueError("PyTorch needs to be installed to use this function.")
    if not torch.allclose(a, b, *args, **kwargs):
        assert False, f"Max diff is absolute {(a - b).abs().max()}. Diff tensor is {(a - b).abs()}."
    return True


108
109
110
111
112
113
114
def numpy_cosine_similarity_distance(a, b):
    similarity = np.dot(a, b) / (norm(a) * norm(b))
    distance = 1.0 - similarity.mean()

    return distance


115
116
117
118
119
120
121
122
123
124
def print_tensor_test(
    tensor,
    limit_to_slices=None,
    max_torch_print=None,
    filename="test_corrections.txt",
    expected_tensor_name="expected_slice",
):
    if max_torch_print:
        torch.set_printoptions(threshold=10_000)

125
126
127
    test_name = os.environ.get("PYTEST_CURRENT_TEST")
    if not torch.is_tensor(tensor):
        tensor = torch.from_numpy(tensor)
128
129
    if limit_to_slices:
        tensor = tensor[0, -3:, -3:, -1]
130
131
132
133
134
135
136
137

    tensor_str = str(tensor.detach().cpu().flatten().to(torch.float32)).replace("\n", "")
    # format is usually:
    # expected_slice = np.array([-0.5713, -0.3018, -0.9814, 0.04663, -0.879, 0.76, -1.734, 0.1044, 1.161])
    output_str = tensor_str.replace("tensor", f"{expected_tensor_name} = np.array")
    test_file, test_class, test_fn = test_name.split("::")
    test_fn = test_fn.split()[0]
    with open(filename, "a") as f:
138
        print("::".join([test_file, test_class, test_fn, output_str]), file=f)
139
140


Patrick von Platen's avatar
Patrick von Platen committed
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
def get_tests_dir(append_path=None):
    """
    Args:
        append_path: optional path to append to the tests dir path
    Return:
        The full path to the `tests` dir, so that the tests can be invoked from anywhere. Optionally `append_path` is
        joined after the `tests` dir the former is provided.
    """
    # this function caller's __file__
    caller__file__ = inspect.stack()[1][1]
    tests_dir = os.path.abspath(os.path.dirname(caller__file__))

    while not tests_dir.endswith("tests"):
        tests_dir = os.path.dirname(tests_dir)

    if append_path:
157
        return Path(tests_dir, append_path).as_posix()
Patrick von Platen's avatar
Patrick von Platen committed
158
159
160
161
    else:
        return tests_dir


162
163
164
165
# Taken from the following PR:
# https://github.com/huggingface/accelerate/pull/1964
def str_to_bool(value) -> int:
    """
166
167
    Converts a string representation of truth to `True` (1) or `False` (0). True values are `y`, `yes`, `t`, `true`,
    `on`, and `1`; False value are `n`, `no`, `f`, `false`, `off`, and `0`;
168
169
170
171
172
173
174
175
176
177
    """
    value = value.lower()
    if value in ("y", "yes", "t", "true", "on", "1"):
        return 1
    elif value in ("n", "no", "f", "false", "off", "0"):
        return 0
    else:
        raise ValueError(f"invalid truth value {value}")


Patrick von Platen's avatar
Patrick von Platen committed
178
179
180
181
182
183
184
185
186
def parse_flag_from_env(key, default=False):
    try:
        value = os.environ[key]
    except KeyError:
        # KEY isn't set, default to `default`.
        _value = default
    else:
        # KEY is set, convert it to True or False.
        try:
Sayak Paul's avatar
Sayak Paul committed
187
            _value = str_to_bool(value)
Patrick von Platen's avatar
Patrick von Platen committed
188
189
190
191
192
193
194
        except ValueError:
            # More values are supported, but let's keep the message simple.
            raise ValueError(f"If set, {key} must be yes or no.")
    return _value


_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
195
_run_nightly_tests = parse_flag_from_env("RUN_NIGHTLY", default=False)
196
_run_compile_tests = parse_flag_from_env("RUN_COMPILE", default=False)
Patrick von Platen's avatar
Patrick von Platen committed
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222


def floats_tensor(shape, scale=1.0, rng=None, name=None):
    """Creates a random float32 tensor"""
    if rng is None:
        rng = global_rng

    total_dims = 1
    for dim in shape:
        total_dims *= dim

    values = []
    for _ in range(total_dims):
        values.append(rng.random() * scale)

    return torch.tensor(data=values, dtype=torch.float).view(shape).contiguous()


def slow(test_case):
    """
    Decorator marking a test as slow.

    Slow tests are skipped by default. Set the RUN_SLOW environment variable to a truthy value to run them.

    """
    return unittest.skipUnless(_run_slow_tests, "test is slow")(test_case)
223
224


225
226
227
228
229
230
231
232
233
234
def nightly(test_case):
    """
    Decorator marking a test that runs nightly in the diffusers CI.

    Slow tests are skipped by default. Set the RUN_NIGHTLY environment variable to a truthy value to run them.

    """
    return unittest.skipUnless(_run_nightly_tests, "test is nightly")(test_case)


235
236
237
238
239
240
241
242
243
244
def is_torch_compile(test_case):
    """
    Decorator marking a test that runs compile tests in the diffusers CI.

    Compile tests are skipped by default. Set the RUN_COMPILE environment variable to a truthy value to run them.

    """
    return unittest.skipUnless(_run_compile_tests, "test is torch compile")(test_case)


245
246
247
248
249
250
251
def require_torch(test_case):
    """
    Decorator marking a test that requires PyTorch. These tests are skipped when PyTorch isn't installed.
    """
    return unittest.skipUnless(is_torch_available(), "test requires PyTorch")(test_case)


252
253
254
255
256
257
258
259
260
def require_torch_2(test_case):
    """
    Decorator marking a test that requires PyTorch 2. These tests are skipped when it isn't installed.
    """
    return unittest.skipUnless(is_torch_available() and is_torch_version(">=", "2.0.0"), "test requires PyTorch 2")(
        test_case
    )


261
262
263
264
265
266
267
268
269
270
271
272
def require_torch_version_greater_equal(torch_version):
    """Decorator marking a test that requires torch with a specific version or greater."""

    def decorator(test_case):
        correct_torch_version = is_torch_available() and is_torch_version(">=", torch_version)
        return unittest.skipUnless(
            correct_torch_version, f"test requires torch with the version greater than or equal to {torch_version}"
        )(test_case)

    return decorator


273
274
275
276
277
278
279
def require_torch_gpu(test_case):
    """Decorator marking a test that requires CUDA and PyTorch."""
    return unittest.skipUnless(is_torch_available() and torch_device == "cuda", "test requires PyTorch+CUDA")(
        test_case
    )


Arsalan's avatar
Arsalan committed
280
281
282
283
284
285
286
287
# These decorators are for accelerator-specific behaviours that are not GPU-specific
def require_torch_accelerator(test_case):
    """Decorator marking a test that requires an accelerator backend and PyTorch."""
    return unittest.skipUnless(is_torch_available() and torch_device != "cpu", "test requires accelerator+PyTorch")(
        test_case
    )


288
289
290
291
292
293
294
295
296
297
298
299
300
301
def require_torch_multi_gpu(test_case):
    """
    Decorator marking a test that requires a multi-GPU setup (in PyTorch). These tests are skipped on a machine without
    multiple GPUs. To run *only* the multi_gpu tests, assuming all test names contain multi_gpu: $ pytest -sv ./tests
    -k "multi_gpu"
    """
    if not is_torch_available():
        return unittest.skip("test requires PyTorch")(test_case)

    import torch

    return unittest.skipUnless(torch.cuda.device_count() > 1, "test requires multiple GPUs")(test_case)


Arsalan's avatar
Arsalan committed
302
303
304
305
306
307
308
309
310
311
312
313
314
315
def require_torch_accelerator_with_fp16(test_case):
    """Decorator marking a test that requires an accelerator with support for the FP16 data type."""
    return unittest.skipUnless(_is_torch_fp16_available(torch_device), "test requires accelerator with fp16 support")(
        test_case
    )


def require_torch_accelerator_with_fp64(test_case):
    """Decorator marking a test that requires an accelerator with support for the FP64 data type."""
    return unittest.skipUnless(_is_torch_fp64_available(torch_device), "test requires accelerator with fp64 support")(
        test_case
    )


316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
def require_big_gpu_with_torch_cuda(test_case):
    """
    Decorator marking a test that requires a bigger GPU (24GB) for execution. Some example pipelines: Flux, SD3, Cog,
    etc.
    """
    if not is_torch_available():
        return unittest.skip("test requires PyTorch")(test_case)

    import torch

    if not torch.cuda.is_available():
        return unittest.skip("test requires PyTorch CUDA")(test_case)

    device_properties = torch.cuda.get_device_properties(0)
    total_memory = device_properties.total_memory / (1024**3)
    return unittest.skipUnless(
        total_memory >= BIG_GPU_MEMORY, f"test requires a GPU with at least {BIG_GPU_MEMORY} GB memory"
    )(test_case)


Arsalan's avatar
Arsalan committed
336
337
338
339
340
341
342
343
def require_torch_accelerator_with_training(test_case):
    """Decorator marking a test that requires an accelerator with support for training."""
    return unittest.skipUnless(
        is_torch_available() and backend_supports_training(torch_device),
        "test requires accelerator with training support",
    )(test_case)


344
345
346
347
348
def skip_mps(test_case):
    """Decorator marking a test to skip if torch_device is 'mps'"""
    return unittest.skipUnless(torch_device != "mps", "test requires non 'mps' device")(test_case)


349
350
351
352
353
354
355
def require_flax(test_case):
    """
    Decorator marking a test that requires JAX & Flax. These tests are skipped when one / both are not installed
    """
    return unittest.skipUnless(is_flax_available(), "test requires JAX & Flax")(test_case)


356
357
358
359
360
361
362
363
def require_compel(test_case):
    """
    Decorator marking a test that requires compel: https://github.com/damian0815/compel. These tests are skipped when
    the library is not installed.
    """
    return unittest.skipUnless(is_compel_available(), "test requires compel")(test_case)


364
365
366
367
368
369
370
def require_onnxruntime(test_case):
    """
    Decorator marking a test that requires onnxruntime. These tests are skipped when onnxruntime isn't installed.
    """
    return unittest.skipUnless(is_onnx_available(), "test requires onnxruntime")(test_case)


371
372
373
374
375
376
377
def require_note_seq(test_case):
    """
    Decorator marking a test that requires note_seq. These tests are skipped when note_seq isn't installed.
    """
    return unittest.skipUnless(is_note_seq_available(), "test requires note_seq")(test_case)


378
379
380
381
382
383
384
385
def require_accelerator(test_case):
    """
    Decorator marking a test that requires a hardware accelerator backend. These tests are skipped when there are no
    hardware accelerator available.
    """
    return unittest.skipUnless(torch_device != "cpu", "test requires a hardware accelerator")(test_case)


386
387
388
389
390
391
392
def require_torchsde(test_case):
    """
    Decorator marking a test that requires torchsde. These tests are skipped when torchsde isn't installed.
    """
    return unittest.skipUnless(is_torchsde_available(), "test requires torchsde")(test_case)


393
394
395
396
397
398
399
400
def require_peft_backend(test_case):
    """
    Decorator marking a test that requires PEFT backend, this would require some specific versions of PEFT and
    transformers.
    """
    return unittest.skipUnless(USE_PEFT_BACKEND, "test requires PEFT backend")(test_case)


401
402
403
404
405
406
407
def require_timm(test_case):
    """
    Decorator marking a test that requires timm. These tests are skipped when timm isn't installed.
    """
    return unittest.skipUnless(is_timm_available(), "test requires timm")(test_case)


408
409
410
411
412
413
414
415
416
417
418
419
420
421
def require_bitsandbytes(test_case):
    """
    Decorator marking a test that requires bitsandbytes. These tests are skipped when bitsandbytes isn't installed.
    """
    return unittest.skipUnless(is_bitsandbytes_available(), "test requires bitsandbytes")(test_case)


def require_accelerate(test_case):
    """
    Decorator marking a test that requires accelerate. These tests are skipped when accelerate isn't installed.
    """
    return unittest.skipUnless(is_accelerate_available(), "test requires accelerate")(test_case)


422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
def require_peft_version_greater(peft_version):
    """
    Decorator marking a test that requires PEFT backend with a specific version, this would require some specific
    versions of PEFT and transformers.
    """

    def decorator(test_case):
        correct_peft_version = is_peft_available() and version.parse(
            version.parse(importlib.metadata.version("peft")).base_version
        ) > version.parse(peft_version)
        return unittest.skipUnless(
            correct_peft_version, f"test requires PEFT backend with the version greater than {peft_version}"
        )(test_case)

    return decorator


439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
def require_transformers_version_greater(transformers_version):
    """
    Decorator marking a test that requires transformers with a specific version, this would require some specific
    versions of PEFT and transformers.
    """

    def decorator(test_case):
        correct_transformers_version = is_transformers_available() and version.parse(
            version.parse(importlib.metadata.version("transformers")).base_version
        ) > version.parse(transformers_version)
        return unittest.skipUnless(
            correct_transformers_version,
            f"test requires transformers with the version greater than {transformers_version}",
        )(test_case)

    return decorator


457
458
def require_accelerate_version_greater(accelerate_version):
    def decorator(test_case):
459
        correct_accelerate_version = is_accelerate_available() and version.parse(
460
461
462
463
464
465
466
467
468
            version.parse(importlib.metadata.version("accelerate")).base_version
        ) > version.parse(accelerate_version)
        return unittest.skipUnless(
            correct_accelerate_version, f"Test requires accelerate with the version greater than {accelerate_version}."
        )(test_case)

    return decorator


469
470
471
472
473
474
475
476
477
478
479
480
def require_bitsandbytes_version_greater(bnb_version):
    def decorator(test_case):
        correct_bnb_version = is_bitsandbytes_available() and version.parse(
            version.parse(importlib.metadata.version("bitsandbytes")).base_version
        ) > version.parse(bnb_version)
        return unittest.skipUnless(
            correct_bnb_version, f"Test requires bitsandbytes with the version greater than {bnb_version}."
        )(test_case)

    return decorator


481
482
483
484
485
486
487
488
489
490
491
492
def require_gguf_version_greater_or_equal(gguf_version):
    def decorator(test_case):
        correct_gguf_version = is_gguf_available() and version.parse(
            version.parse(importlib.metadata.version("gguf")).base_version
        ) >= version.parse(gguf_version)
        return unittest.skipUnless(
            correct_gguf_version, f"Test requires gguf with the version greater than {gguf_version}."
        )(test_case)

    return decorator


Aryan's avatar
Aryan committed
493
494
495
496
497
498
499
500
501
502
503
504
def require_torchao_version_greater(torchao_version):
    def decorator(test_case):
        correct_torchao_version = is_torchao_available() and version.parse(
            version.parse(importlib.metadata.version("torchao")).base_version
        ) > version.parse(torchao_version)
        return unittest.skipUnless(
            correct_torchao_version, f"Test requires torchao with version greater than {torchao_version}."
        )(test_case)

    return decorator


505
506
507
508
509
510
511
def deprecate_after_peft_backend(test_case):
    """
    Decorator marking a test that will be skipped after PEFT backend
    """
    return unittest.skipUnless(not USE_PEFT_BACKEND, "test skipped in favor of PEFT backend")(test_case)


512
513
514
515
516
517
def get_python_version():
    sys_info = sys.version_info
    major, minor = sys_info.major, sys_info.minor
    return major, minor


518
def load_numpy(arry: Union[str, np.ndarray], local_path: Optional[str] = None) -> np.ndarray:
519
    if isinstance(arry, str):
520
521
        if local_path is not None:
            # local_path can be passed to correct images of tests
522
            return Path(local_path, arry.split("/")[-5], arry.split("/")[-2], arry.split("/")[-1]).as_posix()
523
        elif arry.startswith("http://") or arry.startswith("https://"):
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
            response = requests.get(arry)
            response.raise_for_status()
            arry = np.load(BytesIO(response.content))
        elif os.path.isfile(arry):
            arry = np.load(arry)
        else:
            raise ValueError(
                f"Incorrect path or url, URLs must start with `http://` or `https://`, and {arry} is not a valid path"
            )
    elif isinstance(arry, np.ndarray):
        pass
    else:
        raise ValueError(
            "Incorrect format used for numpy ndarray. Should be an url linking to an image, a local path, or a"
            " ndarray."
        )

    return arry


544
545
546
547
548
549
550
def load_pt(url: str):
    response = requests.get(url)
    response.raise_for_status()
    arry = torch.load(BytesIO(response.content))
    return arry


551
552
553
def load_image(image: Union[str, PIL.Image.Image]) -> PIL.Image.Image:
    """
    Loads `image` to a PIL Image.
Steven Liu's avatar
Steven Liu committed
554
555

    Args:
556
557
558
        image (`str` or `PIL.Image.Image`):
            The image to convert to the PIL Image format.
    Returns:
Steven Liu's avatar
Steven Liu committed
559
560
        `PIL.Image.Image`:
            A PIL Image.
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
    """
    if isinstance(image, str):
        if image.startswith("http://") or image.startswith("https://"):
            image = PIL.Image.open(requests.get(image, stream=True).raw)
        elif os.path.isfile(image):
            image = PIL.Image.open(image)
        else:
            raise ValueError(
                f"Incorrect path or url, URLs must start with `http://` or `https://`, and {image} is not a valid path"
            )
    elif isinstance(image, PIL.Image.Image):
        image = image
    else:
        raise ValueError(
            "Incorrect format used for image. Should be an url linking to an image, a local path, or a PIL image."
        )
    image = PIL.ImageOps.exif_transpose(image)
    image = image.convert("RGB")
    return image
580
581


582
583
584
585
586
587
588
589
590
591
def preprocess_image(image: PIL.Image, batch_size: int):
    w, h = image.size
    w, h = (x - x % 8 for x in (w, h))  # resize to integer multiple of 8
    image = image.resize((w, h), resample=PIL.Image.LANCZOS)
    image = np.array(image).astype(np.float32) / 255.0
    image = np.vstack([image[None].transpose(0, 3, 1, 2)] * batch_size)
    image = torch.from_numpy(image)
    return 2.0 * image - 1.0


YiYi Xu's avatar
YiYi Xu committed
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
def export_to_gif(image: List[PIL.Image.Image], output_gif_path: str = None) -> str:
    if output_gif_path is None:
        output_gif_path = tempfile.NamedTemporaryFile(suffix=".gif").name

    image[0].save(
        output_gif_path,
        save_all=True,
        append_images=image[1:],
        optimize=False,
        duration=100,
        loop=0,
    )
    return output_gif_path


607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
@contextmanager
def buffered_writer(raw_f):
    f = io.BufferedWriter(raw_f)
    yield f
    f.flush()


def export_to_ply(mesh, output_ply_path: str = None):
    """
    Write a PLY file for a mesh.
    """
    if output_ply_path is None:
        output_ply_path = tempfile.NamedTemporaryFile(suffix=".ply").name

    coords = mesh.verts.detach().cpu().numpy()
    faces = mesh.faces.cpu().numpy()
    rgb = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1)

    with buffered_writer(open(output_ply_path, "wb")) as f:
        f.write(b"ply\n")
        f.write(b"format binary_little_endian 1.0\n")
        f.write(bytes(f"element vertex {len(coords)}\n", "ascii"))
        f.write(b"property float x\n")
        f.write(b"property float y\n")
        f.write(b"property float z\n")
        if rgb is not None:
            f.write(b"property uchar red\n")
            f.write(b"property uchar green\n")
            f.write(b"property uchar blue\n")
        if faces is not None:
            f.write(bytes(f"element face {len(faces)}\n", "ascii"))
            f.write(b"property list uchar int vertex_index\n")
        f.write(b"end_header\n")

        if rgb is not None:
            rgb = (rgb * 255.499).round().astype(int)
            vertices = [
                (*coord, *rgb)
                for coord, rgb in zip(
                    coords.tolist(),
                    rgb.tolist(),
                )
            ]
            format = struct.Struct("<3f3B")
            for item in vertices:
                f.write(format.pack(*item))
        else:
            format = struct.Struct("<3f")
            for vertex in coords.tolist():
                f.write(format.pack(*vertex))

        if faces is not None:
            format = struct.Struct("<B3I")
            for tri in faces.tolist():
                f.write(format.pack(len(tri), *tri))

    return output_ply_path


def export_to_obj(mesh, output_obj_path: str = None):
    if output_obj_path is None:
        output_obj_path = tempfile.NamedTemporaryFile(suffix=".obj").name

    verts = mesh.verts.detach().cpu().numpy()
    faces = mesh.faces.cpu().numpy()

    vertex_colors = np.stack([mesh.vertex_channels[x].detach().cpu().numpy() for x in "RGB"], axis=1)
    vertices = [
        "{} {} {} {} {} {}".format(*coord, *color) for coord, color in zip(verts.tolist(), vertex_colors.tolist())
    ]

    faces = ["f {} {} {}".format(str(tri[0] + 1), str(tri[1] + 1), str(tri[2] + 1)) for tri in faces.tolist()]

    combined_data = ["v " + vertex for vertex in vertices] + faces

    with open(output_obj_path, "w") as f:
        f.writelines("\n".join(combined_data))


686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
def export_to_video(video_frames: List[np.ndarray], output_video_path: str = None) -> str:
    if is_opencv_available():
        import cv2
    else:
        raise ImportError(BACKENDS_MAPPING["opencv"][1].format("export_to_video"))
    if output_video_path is None:
        output_video_path = tempfile.NamedTemporaryFile(suffix=".mp4").name

    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    h, w, c = video_frames[0].shape
    video_writer = cv2.VideoWriter(output_video_path, fourcc, fps=8, frameSize=(w, h))
    for i in range(len(video_frames)):
        img = cv2.cvtColor(video_frames[i], cv2.COLOR_RGB2BGR)
        video_writer.write(img)
    return output_video_path


703
def load_hf_numpy(path) -> np.ndarray:
704
705
706
    base_url = "https://huggingface.co/datasets/fusing/diffusers-testing/resolve/main"

    if not path.startswith("http://") and not path.startswith("https://"):
707
        path = os.path.join(base_url, urllib.parse.quote(path))
708

709
    return load_numpy(path)
710
711


712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
# --- pytest conf functions --- #

# to avoid multiple invocation from tests/conftest.py and examples/conftest.py - make sure it's called only once
pytest_opt_registered = {}


def pytest_addoption_shared(parser):
    """
    This function is to be called from `conftest.py` via `pytest_addoption` wrapper that has to be defined there.

    It allows loading both `conftest.py` files at once without causing a failure due to adding the same `pytest`
    option.

    """
    option = "--make-reports"
    if option not in pytest_opt_registered:
        parser.addoption(
            option,
            action="store",
            default=False,
            help="generate report files. The value of this option is used as a prefix to report names",
        )
        pytest_opt_registered[option] = 1


def pytest_terminal_summary_main(tr, id):
    """
    Generate multiple reports at the end of test suite run - each report goes into a dedicated file in the current
    directory. The report files are prefixed with the test suite name.

    This function emulates --duration and -rA pytest arguments.

    This function is to be called from `conftest.py` via `pytest_terminal_summary` wrapper that has to be defined
    there.

    Args:
    - tr: `terminalreporter` passed from `conftest.py`
    - id: unique id like `tests` or `examples` that will be incorporated into the final reports filenames - this is
      needed as some jobs have multiple runs of pytest, so we can't have them overwrite each other.

    NB: this functions taps into a private _pytest API and while unlikely, it could break should
    pytest do internal changes - also it calls default internal methods of terminalreporter which
    can be hijacked by various `pytest-` plugins and interfere.

    """
    from _pytest.config import create_terminal_writer

    if not len(id):
        id = "tests"

    config = tr.config
    orig_writer = config.get_terminal_writer()
    orig_tbstyle = config.option.tbstyle
    orig_reportchars = tr.reportchars

    dir = "reports"
    Path(dir).mkdir(parents=True, exist_ok=True)
    report_files = {
        k: f"{dir}/{id}_{k}.txt"
        for k in [
            "durations",
            "errors",
            "failures_long",
            "failures_short",
            "failures_line",
            "passes",
            "stats",
            "summary_short",
            "warnings",
        ]
    }

    # custom durations report
    # note: there is no need to call pytest --durations=XX to get this separate report
    # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/runner.py#L66
    dlist = []
    for replist in tr.stats.values():
        for rep in replist:
            if hasattr(rep, "duration"):
                dlist.append(rep)
    if dlist:
        dlist.sort(key=lambda x: x.duration, reverse=True)
        with open(report_files["durations"], "w") as f:
            durations_min = 0.05  # sec
            f.write("slowest durations\n")
            for i, rep in enumerate(dlist):
                if rep.duration < durations_min:
                    f.write(f"{len(dlist)-i} durations < {durations_min} secs were omitted")
                    break
                f.write(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}\n")

    def summary_failures_short(tr):
        # expecting that the reports were --tb=long (default) so we chop them off here to the last frame
        reports = tr.getreports("failed")
        if not reports:
            return
        tr.write_sep("=", "FAILURES SHORT STACK")
        for rep in reports:
            msg = tr._getfailureheadline(rep)
            tr.write_sep("_", msg, red=True, bold=True)
            # chop off the optional leading extra frames, leaving only the last one
            longrepr = re.sub(r".*_ _ _ (_ ){10,}_ _ ", "", rep.longreprtext, 0, re.M | re.S)
            tr._tw.line(longrepr)
            # note: not printing out any rep.sections to keep the report short

    # use ready-made report funcs, we are just hijacking the filehandle to log to a dedicated file each
    # adapted from https://github.com/pytest-dev/pytest/blob/897f151e/src/_pytest/terminal.py#L814
    # note: some pytest plugins may interfere by hijacking the default `terminalreporter` (e.g.
    # pytest-instafail does that)

    # report failures with line/short/long styles
    config.option.tbstyle = "auto"  # full tb
    with open(report_files["failures_long"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_failures()

    # config.option.tbstyle = "short" # short tb
    with open(report_files["failures_short"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        summary_failures_short(tr)

    config.option.tbstyle = "line"  # one line per error
    with open(report_files["failures_line"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_failures()

    with open(report_files["errors"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_errors()

    with open(report_files["warnings"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_warnings()  # normal warnings
        tr.summary_warnings()  # final warnings

    tr.reportchars = "wPpsxXEf"  # emulate -rA (used in summary_passes() and short_test_summary())
    with open(report_files["passes"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_passes()

    with open(report_files["summary_short"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.short_test_summary()

    with open(report_files["stats"], "w") as f:
        tr._tw = create_terminal_writer(config, f)
        tr.summary_stats()

    # restore:
    tr._tw = orig_writer
    tr.reportchars = orig_reportchars
    config.option.tbstyle = orig_tbstyle
864
865


866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
# Copied from https://github.com/huggingface/transformers/blob/000e52aec8850d3fe2f360adc6fd256e5b47fe4c/src/transformers/testing_utils.py#L1905
def is_flaky(max_attempts: int = 5, wait_before_retry: Optional[float] = None, description: Optional[str] = None):
    """
    To decorate flaky tests. They will be retried on failures.

    Args:
        max_attempts (`int`, *optional*, defaults to 5):
            The maximum number of attempts to retry the flaky test.
        wait_before_retry (`float`, *optional*):
            If provided, will wait that number of seconds before retrying the test.
        description (`str`, *optional*):
            A string to describe the situation (what / where / why is flaky, link to GH issue/PR comments, errors,
            etc.)
    """

    def decorator(test_func_ref):
        @functools.wraps(test_func_ref)
        def wrapper(*args, **kwargs):
            retry_count = 1

            while retry_count < max_attempts:
                try:
                    return test_func_ref(*args, **kwargs)

                except Exception as err:
                    print(f"Test failed with {err} at try {retry_count}/{max_attempts}.", file=sys.stderr)
                    if wait_before_retry is not None:
                        time.sleep(wait_before_retry)
                    retry_count += 1

            return test_func_ref(*args, **kwargs)

        return wrapper

    return decorator


903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
# Taken from: https://github.com/huggingface/transformers/blob/3658488ff77ff8d45101293e749263acf437f4d5/src/transformers/testing_utils.py#L1787
def run_test_in_subprocess(test_case, target_func, inputs=None, timeout=None):
    """
    To run a test in a subprocess. In particular, this can avoid (GPU) memory issue.

    Args:
        test_case (`unittest.TestCase`):
            The test that will run `target_func`.
        target_func (`Callable`):
            The function implementing the actual testing logic.
        inputs (`dict`, *optional*, defaults to `None`):
            The inputs that will be passed to `target_func` through an (input) queue.
        timeout (`int`, *optional*, defaults to `None`):
            The timeout (in seconds) that will be passed to the input and output queues. If not specified, the env.
            variable `PYTEST_TIMEOUT` will be checked. If still `None`, its value will be set to `600`.
    """
    if timeout is None:
        timeout = int(os.environ.get("PYTEST_TIMEOUT", 600))

    start_methohd = "spawn"
    ctx = multiprocessing.get_context(start_methohd)

    input_queue = ctx.Queue(1)
    output_queue = ctx.JoinableQueue(1)

    # We can't send `unittest.TestCase` to the child, otherwise we get issues regarding pickle.
    input_queue.put(inputs, timeout=timeout)

    process = ctx.Process(target=target_func, args=(input_queue, output_queue, timeout))
    process.start()
    # Kill the child process if we can't get outputs from it in time: otherwise, the hanging subprocess prevents
    # the test to exit properly.
    try:
        results = output_queue.get(timeout=timeout)
        output_queue.task_done()
    except Exception as e:
        process.terminate()
        test_case.fail(e)
    process.join(timeout=timeout)

    if results["error"] is not None:
        test_case.fail(f'{results["error"]}')


947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
class CaptureLogger:
    """
    Args:
    Context manager to capture `logging` streams
        logger: 'logging` logger object
    Returns:
        The captured output is available via `self.out`
    Example:
    ```python
    >>> from diffusers import logging
    >>> from diffusers.testing_utils import CaptureLogger

    >>> msg = "Testing 1, 2, 3"
    >>> logging.set_verbosity_info()
    >>> logger = logging.get_logger("diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.py")
    >>> with CaptureLogger(logger) as cl:
    ...     logger.info(msg)
    >>> assert cl.out, msg + "\n"
    ```
    """

    def __init__(self, logger):
        self.logger = logger
        self.io = StringIO()
        self.sh = logging.StreamHandler(self.io)
        self.out = ""

    def __enter__(self):
        self.logger.addHandler(self.sh)
        return self

    def __exit__(self, *exc):
        self.logger.removeHandler(self.sh)
        self.out = self.io.getvalue()

    def __repr__(self):
        return f"captured: {self.out}\n"
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001


def enable_full_determinism():
    """
    Helper function for reproducible behavior during distributed training. See
    - https://pytorch.org/docs/stable/notes/randomness.html for pytorch
    """
    #  Enable PyTorch deterministic mode. This potentially requires either the environment
    #  variable 'CUDA_LAUNCH_BLOCKING' or 'CUBLAS_WORKSPACE_CONFIG' to be set,
    # depending on the CUDA version, so we set them both here
    os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
    os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":16:8"
    torch.use_deterministic_algorithms(True)

    # Enable CUDNN deterministic mode
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    torch.backends.cuda.matmul.allow_tf32 = False
1002
1003
1004
1005
1006
1007


def disable_full_determinism():
    os.environ["CUDA_LAUNCH_BLOCKING"] = "0"
    os.environ["CUBLAS_WORKSPACE_CONFIG"] = ""
    torch.use_deterministic_algorithms(False)
Arsalan's avatar
Arsalan committed
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020


# Utils for custom and alternative accelerator devices
def _is_torch_fp16_available(device):
    if not is_torch_available():
        return False

    import torch

    device = torch.device(device)

    try:
        x = torch.zeros((2, 2), dtype=torch.float16).to(device)
Dhruv Nair's avatar
Dhruv Nair committed
1021
1022
1023
        _ = torch.mul(x, x)
        return True

Arsalan's avatar
Arsalan committed
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
    except Exception as e:
        if device.type == "cuda":
            raise ValueError(
                f"You have passed a device of type 'cuda' which should work with 'fp16', but 'cuda' does not seem to be correctly installed on your machine: {e}"
            )

        return False


def _is_torch_fp64_available(device):
    if not is_torch_available():
        return False

    import torch

1039
1040
    device = torch.device(device)

Arsalan's avatar
Arsalan committed
1041
1042
    try:
        x = torch.zeros((2, 2), dtype=torch.float64).to(device)
Dhruv Nair's avatar
Dhruv Nair committed
1043
1044
1045
        _ = torch.mul(x, x)
        return True

Arsalan's avatar
Arsalan committed
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
    except Exception as e:
        if device.type == "cuda":
            raise ValueError(
                f"You have passed a device of type 'cuda' which should work with 'fp64', but 'cuda' does not seem to be correctly installed on your machine: {e}"
            )

        return False


# Guard these lookups for when Torch is not used - alternative accelerator support is for PyTorch
if is_torch_available():
    # Behaviour flags
    BACKEND_SUPPORTS_TRAINING = {"cuda": True, "cpu": True, "mps": False, "default": True}

    # Function definitions
    BACKEND_EMPTY_CACHE = {"cuda": torch.cuda.empty_cache, "cpu": None, "mps": None, "default": None}
    BACKEND_DEVICE_COUNT = {"cuda": torch.cuda.device_count, "cpu": lambda: 0, "mps": lambda: 0, "default": 0}
    BACKEND_MANUAL_SEED = {"cuda": torch.cuda.manual_seed, "cpu": torch.manual_seed, "default": torch.manual_seed}


# This dispatches a defined function according to the accelerator from the function definitions.
def _device_agnostic_dispatch(device: str, dispatch_table: Dict[str, Callable], *args, **kwargs):
    if device not in dispatch_table:
        return dispatch_table["default"](*args, **kwargs)

    fn = dispatch_table[device]

    # Some device agnostic functions return values. Need to guard against 'None' instead at
    # user level
    if fn is None:
        return None

    return fn(*args, **kwargs)


# These are callables which automatically dispatch the function specific to the accelerator
def backend_manual_seed(device: str, seed: int):
    return _device_agnostic_dispatch(device, BACKEND_MANUAL_SEED, seed)


def backend_empty_cache(device: str):
    return _device_agnostic_dispatch(device, BACKEND_EMPTY_CACHE)


def backend_device_count(device: str):
    return _device_agnostic_dispatch(device, BACKEND_DEVICE_COUNT)


# These are callables which return boolean behaviour flags and can be used to specify some
# device agnostic alternative where the feature is unsupported.
def backend_supports_training(device: str):
    if not is_torch_available():
        return False

    if device not in BACKEND_SUPPORTS_TRAINING:
        device = "default"

    return BACKEND_SUPPORTS_TRAINING[device]


# Guard for when Torch is not available
if is_torch_available():
    # Update device function dict mapping
    def update_mapping_from_spec(device_fn_dict: Dict[str, Callable], attribute_name: str):
        try:
            # Try to import the function directly
            spec_fn = getattr(device_spec_module, attribute_name)
            device_fn_dict[torch_device] = spec_fn
        except AttributeError as e:
            # If the function doesn't exist, and there is no default, throw an error
            if "default" not in device_fn_dict:
                raise AttributeError(
                    f"`{attribute_name}` not found in '{device_spec_path}' and no default fallback function found."
                ) from e

    if "DIFFUSERS_TEST_DEVICE_SPEC" in os.environ:
        device_spec_path = os.environ["DIFFUSERS_TEST_DEVICE_SPEC"]
        if not Path(device_spec_path).is_file():
            raise ValueError(f"Specified path to device specification file is not found. Received {device_spec_path}")

        try:
            import_name = device_spec_path[: device_spec_path.index(".py")]
        except ValueError as e:
            raise ValueError(f"Provided device spec file is not a Python file! Received {device_spec_path}") from e

        device_spec_module = importlib.import_module(import_name)

        try:
            device_name = device_spec_module.DEVICE_NAME
        except AttributeError:
            raise AttributeError("Device spec file did not contain `DEVICE_NAME`")

        if "DIFFUSERS_TEST_DEVICE" in os.environ and torch_device != device_name:
            msg = f"Mismatch between environment variable `DIFFUSERS_TEST_DEVICE` '{torch_device}' and device found in spec '{device_name}'\n"
            msg += "Either unset `DIFFUSERS_TEST_DEVICE` or ensure it matches device spec name."
            raise ValueError(msg)

        torch_device = device_name

        # Add one entry here for each `BACKEND_*` dictionary.
        update_mapping_from_spec(BACKEND_MANUAL_SEED, "MANUAL_SEED_FN")
        update_mapping_from_spec(BACKEND_EMPTY_CACHE, "EMPTY_CACHE_FN")
        update_mapping_from_spec(BACKEND_DEVICE_COUNT, "DEVICE_COUNT_FN")
        update_mapping_from_spec(BACKEND_SUPPORTS_TRAINING, "SUPPORTS_TRAINING")