import_utils.py 28.1 KB
Newer Older
1
# Copyright 2024 The HuggingFace Team. All rights reserved.
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Import utilities: Utilities related to imports and our lazy inits.
"""
Mengqing Cao's avatar
Mengqing Cao committed
17

18
import importlib.util
19
import operator as op
20
21
22
import os
import sys
from collections import OrderedDict
Dhruv Nair's avatar
Dhruv Nair committed
23
24
25
from itertools import chain
from types import ModuleType
from typing import Any, Union
26

Lucain's avatar
Lucain committed
27
from huggingface_hub.utils import is_jinja_available  # noqa: F401
28
from packaging.version import Version, parse
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47

from . import logging


# The package importlib_metadata is in a different place, depending on the python version.
if sys.version_info < (3, 8):
    import importlib_metadata
else:
    import importlib.metadata as importlib_metadata


logger = logging.get_logger(__name__)  # pylint: disable=invalid-name

ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})

USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper()
48
USE_SAFETENSORS = os.environ.get("USE_SAFETENSORS", "AUTO").upper()
49
50
DIFFUSERS_SLOW_IMPORT = os.environ.get("DIFFUSERS_SLOW_IMPORT", "FALSE").upper()
DIFFUSERS_SLOW_IMPORT = DIFFUSERS_SLOW_IMPORT in ENV_VARS_TRUE_VALUES
51

52
53
STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}

54
55
56
57
58
59
60
61
_is_google_colab = "google.colab" in sys.modules or any(k.startswith("COLAB_") for k in os.environ)


def _is_package_available(pkg_name: str):
    pkg_exists = importlib.util.find_spec(pkg_name) is not None
    pkg_version = "N/A"

    if pkg_exists:
62
        try:
63
64
65
66
67
68
69
70
71
72
73
            pkg_version = importlib_metadata.version(pkg_name)
            logger.debug(f"Successfully imported {pkg_name} version {pkg_version}")
        except (ImportError, importlib_metadata.PackageNotFoundError):
            pkg_exists = False

    return pkg_exists, pkg_version


if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
    _torch_available, _torch_version = _is_package_available("torch")

74
else:
75
    logger.info("Disabling PyTorch because USE_TORCH is set")
76
77
    _torch_available = False

78
79
_jax_version = "N/A"
_flax_version = "N/A"
80
81
82
83
84
85
86
87
88
89
90
91
if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
    _flax_available = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("flax") is not None
    if _flax_available:
        try:
            _jax_version = importlib_metadata.version("jax")
            _flax_version = importlib_metadata.version("flax")
            logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.")
        except importlib_metadata.PackageNotFoundError:
            _flax_available = False
else:
    _flax_available = False

92
if USE_SAFETENSORS in ENV_VARS_TRUE_AND_AUTO_VALUES:
93
94
    _safetensors_available, _safetensors_version = _is_package_available("safetensors")

95
96
97
else:
    logger.info("Disabling Safetensors because USE_TF is set")
    _safetensors_available = False
98

99
_onnxruntime_version = "N/A"
100
_onnx_available = importlib.util.find_spec("onnxruntime") is not None
SkyTNT's avatar
SkyTNT committed
101
if _onnx_available:
102
103
    candidates = (
        "onnxruntime",
104
105
106
        "onnxruntime-cann",
        "onnxruntime-directml",
        "ort_nightly_directml",
107
        "onnxruntime-gpu",
108
        "ort_nightly_gpu",
109
        "onnxruntime-migraphx",
110
        "onnxruntime-openvino",
111
        "onnxruntime-qnn",
112
113
        "onnxruntime-rocm",
        "onnxruntime-training",
114
        "onnxruntime-vitisai",
115
    )
SkyTNT's avatar
SkyTNT committed
116
    _onnxruntime_version = None
117
    # For the metadata, we have to look for both onnxruntime and onnxruntime-x
SkyTNT's avatar
SkyTNT committed
118
119
120
121
122
123
124
125
126
    for pkg in candidates:
        try:
            _onnxruntime_version = importlib_metadata.version(pkg)
            break
        except importlib_metadata.PackageNotFoundError:
            pass
    _onnx_available = _onnxruntime_version is not None
    if _onnx_available:
        logger.debug(f"Successfully imported onnxruntime version {_onnxruntime_version}")
127

128
129
130
# (sayakpaul): importlib.util.find_spec("opencv-python") returns None even when it's installed.
# _opencv_available = importlib.util.find_spec("opencv-python") is not None
try:
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
    candidates = (
        "opencv-python",
        "opencv-contrib-python",
        "opencv-python-headless",
        "opencv-contrib-python-headless",
    )
    _opencv_version = None
    for pkg in candidates:
        try:
            _opencv_version = importlib_metadata.version(pkg)
            break
        except importlib_metadata.PackageNotFoundError:
            pass
    _opencv_available = _opencv_version is not None
    if _opencv_available:
        logger.debug(f"Successfully imported cv2 version {_opencv_version}")
147
148
except importlib_metadata.PackageNotFoundError:
    _opencv_available = False
149

Patrick von Platen's avatar
Patrick von Platen committed
150
151
152
153
154
155
156
157
_bs4_available = importlib.util.find_spec("bs4") is not None
try:
    # importlib metadata under different name
    _bs4_version = importlib_metadata.version("beautifulsoup4")
    logger.debug(f"Successfully imported ftfy version {_bs4_version}")
except importlib_metadata.PackageNotFoundError:
    _bs4_available = False

158
159
160
161
162
163
164
_invisible_watermark_available = importlib.util.find_spec("imwatermark") is not None
try:
    _invisible_watermark_version = importlib_metadata.version("invisible-watermark")
    logger.debug(f"Successfully imported invisible-watermark version {_invisible_watermark_version}")
except importlib_metadata.PackageNotFoundError:
    _invisible_watermark_available = False

165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
_torch_xla_available, _torch_xla_version = _is_package_available("torch_xla")
_torch_npu_available, _torch_npu_version = _is_package_available("torch_npu")
_transformers_available, _transformers_version = _is_package_available("transformers")
_hf_hub_available, _hf_hub_version = _is_package_available("huggingface_hub")
_inflect_available, _inflect_version = _is_package_available("inflect")
_unidecode_available, _unidecode_version = _is_package_available("unidecode")
_k_diffusion_available, _k_diffusion_version = _is_package_available("k_diffusion")
_note_seq_available, _note_seq_version = _is_package_available("note_seq")
_wandb_available, _wandb_version = _is_package_available("wandb")
_tensorboard_available, _tensorboard_version = _is_package_available("tensorboard")
_compel_available, _compel_version = _is_package_available("compel")
_sentencepiece_available, _sentencepiece_version = _is_package_available("sentencepiece")
_torchsde_available, _torchsde_version = _is_package_available("torchsde")
_peft_available, _peft_version = _is_package_available("peft")
_torchvision_available, _torchvision_version = _is_package_available("torchvision")
_matplotlib_available, _matplotlib_version = _is_package_available("matplotlib")
_timm_available, _timm_version = _is_package_available("timm")
_bitsandbytes_available, _bitsandbytes_version = _is_package_available("bitsandbytes")
_imageio_available, _imageio_version = _is_package_available("imageio")
_ftfy_available, _ftfy_version = _is_package_available("ftfy")
_scipy_available, _scipy_version = _is_package_available("scipy")
_librosa_available, _librosa_version = _is_package_available("librosa")
_accelerate_available, _accelerate_version = _is_package_available("accelerate")
_xformers_available, _xformers_version = _is_package_available("xformers")
_gguf_available, _gguf_version = _is_package_available("gguf")
_torchao_available, _torchao_version = _is_package_available("torchao")
_bitsandbytes_available, _bitsandbytes_version = _is_package_available("bitsandbytes")
_torchao_available, _torchao_version = _is_package_available("torchao")

_optimum_quanto_available = importlib.util.find_spec("optimum") is not None
if _optimum_quanto_available:
196
197
198
199
    try:
        _optimum_quanto_version = importlib_metadata.version("optimum_quanto")
        logger.debug(f"Successfully import optimum-quanto version {_optimum_quanto_version}")
    except importlib_metadata.PackageNotFoundError:
200
        _optimum_quanto_available = False
201
202


203
204
205
206
def is_torch_available():
    return _torch_available


207
208
209
210
def is_torch_xla_available():
    return _torch_xla_available


Mengqing Cao's avatar
Mengqing Cao committed
211
212
213
214
def is_torch_npu_available():
    return _torch_npu_available


215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
def is_flax_available():
    return _flax_available


def is_transformers_available():
    return _transformers_available


def is_inflect_available():
    return _inflect_available


def is_unidecode_available():
    return _unidecode_available


231
232
233
234
def is_onnx_available():
    return _onnx_available


235
236
237
238
def is_opencv_available():
    return _opencv_available


239
240
241
242
def is_scipy_available():
    return _scipy_available


243
244
245
246
def is_librosa_available():
    return _librosa_available


247
248
249
250
def is_xformers_available():
    return _xformers_available


251
252
253
254
def is_accelerate_available():
    return _accelerate_available


255
256
257
258
def is_k_diffusion_available():
    return _k_diffusion_available


259
260
261
262
def is_note_seq_available():
    return _note_seq_available


263
264
265
266
def is_wandb_available():
    return _wandb_available


267
268
269
270
def is_tensorboard_available():
    return _tensorboard_available


271
272
273
274
def is_compel_available():
    return _compel_available


Patrick von Platen's avatar
Patrick von Platen committed
275
276
277
278
279
280
281
282
def is_ftfy_available():
    return _ftfy_available


def is_bs4_available():
    return _bs4_available


283
284
285
286
def is_torchsde_available():
    return _torchsde_available


287
288
289
290
def is_invisible_watermark_available():
    return _invisible_watermark_available


291
292
293
294
def is_peft_available():
    return _peft_available


YiYi Xu's avatar
YiYi Xu committed
295
296
297
298
def is_torchvision_available():
    return _torchvision_available


299
300
301
302
def is_matplotlib_available():
    return _matplotlib_available


303
304
305
306
307
308
309
310
311
312
313
314
def is_safetensors_available():
    return _safetensors_available


def is_bitsandbytes_available():
    return _bitsandbytes_available


def is_google_colab():
    return _is_google_colab


315
316
317
318
def is_sentencepiece_available():
    return _sentencepiece_available


319
320
321
322
def is_imageio_available():
    return _imageio_available


323
def is_gguf_available():
324
    return _gguf_available
325
326


Aryan's avatar
Aryan committed
327
def is_torchao_available():
328
    return _torchao_available
Aryan's avatar
Aryan committed
329
330


331
def is_optimum_quanto_available():
332
333
334
335
336
    return _optimum_quanto_available


def is_timm_available():
    return _timm_available
337
338


339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
# docstyle-ignore
FLAX_IMPORT_ERROR = """
{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the
installation page: https://github.com/google/flax and follow the ones that match your environment.
"""

# docstyle-ignore
INFLECT_IMPORT_ERROR = """
{0} requires the inflect library but it was not found in your environment. You can install it with pip: `pip install
inflect`
"""

# docstyle-ignore
PYTORCH_IMPORT_ERROR = """
{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the
installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment.
"""

357
358
359
360
361
362
# docstyle-ignore
ONNX_IMPORT_ERROR = """
{0} requires the onnxruntime library but it was not found in your environment. You can install it with pip: `pip
install onnxruntime`
"""

363
364
365
366
367
368
# docstyle-ignore
OPENCV_IMPORT_ERROR = """
{0} requires the OpenCV library but it was not found in your environment. You can install it with pip: `pip
install opencv-python`
"""

369
370
371
372
373
374
# docstyle-ignore
SCIPY_IMPORT_ERROR = """
{0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install
scipy`
"""

375
376
377
378
379
380
# docstyle-ignore
LIBROSA_IMPORT_ERROR = """
{0} requires the librosa library but it was not found in your environment.  Checkout the instructions on the
installation page: https://librosa.org/doc/latest/install.html and follow the ones that match your environment.
"""

381
382
383
384
385
386
387
388
389
390
391
392
# docstyle-ignore
TRANSFORMERS_IMPORT_ERROR = """
{0} requires the transformers library but it was not found in your environment. You can install it with pip: `pip
install transformers`
"""

# docstyle-ignore
UNIDECODE_IMPORT_ERROR = """
{0} requires the unidecode library but it was not found in your environment. You can install it with pip: `pip install
Unidecode`
"""

393
394
395
396
397
398
# docstyle-ignore
K_DIFFUSION_IMPORT_ERROR = """
{0} requires the k-diffusion library but it was not found in your environment. You can install it with pip: `pip
install k-diffusion`
"""

399
400
401
402
403
404
# docstyle-ignore
NOTE_SEQ_IMPORT_ERROR = """
{0} requires the note-seq library but it was not found in your environment. You can install it with pip: `pip
install note-seq`
"""

405
406
407
408
409
410
# docstyle-ignore
WANDB_IMPORT_ERROR = """
{0} requires the wandb library but it was not found in your environment. You can install it with pip: `pip
install wandb`
"""

411
412
413
414
415
416
# docstyle-ignore
TENSORBOARD_IMPORT_ERROR = """
{0} requires the tensorboard library but it was not found in your environment. You can install it with pip: `pip
install tensorboard`
"""

417
418
419
420
421
422

# docstyle-ignore
COMPEL_IMPORT_ERROR = """
{0} requires the compel library but it was not found in your environment. You can install it with pip: `pip install compel`
"""

Patrick von Platen's avatar
Patrick von Platen committed
423
424
425
426
427
428
429
430
431
432
433
434
435
# docstyle-ignore
BS4_IMPORT_ERROR = """
{0} requires the Beautiful Soup library but it was not found in your environment. You can install it with pip:
`pip install beautifulsoup4`. Please note that you may need to restart your runtime after installation.
"""

# docstyle-ignore
FTFY_IMPORT_ERROR = """
{0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the
installation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones
that match your environment. Please note that you may need to restart your runtime after installation.
"""

436
437
438
439
440
# docstyle-ignore
TORCHSDE_IMPORT_ERROR = """
{0} requires the torchsde library but it was not found in your environment. You can install it with pip: `pip install torchsde`
"""

441
442
# docstyle-ignore
INVISIBLE_WATERMARK_IMPORT_ERROR = """
443
{0} requires the invisible-watermark library but it was not found in your environment. You can install it with pip: `pip install invisible-watermark>=0.2.0`
444
445
"""

446
447
448
449
450
451
452
453
454
455
# docstyle-ignore
PEFT_IMPORT_ERROR = """
{0} requires the peft library but it was not found in your environment. You can install it with pip: `pip install peft`
"""

# docstyle-ignore
SAFETENSORS_IMPORT_ERROR = """
{0} requires the safetensors library but it was not found in your environment. You can install it with pip: `pip install safetensors`
"""

456
457
458
459
460
461
# docstyle-ignore
SENTENCEPIECE_IMPORT_ERROR = """
{0} requires the sentencepiece library but it was not found in your environment. You can install it with pip: `pip install sentencepiece`
"""


462
463
464
465
# docstyle-ignore
BITSANDBYTES_IMPORT_ERROR = """
{0} requires the bitsandbytes library but it was not found in your environment. You can install it with pip: `pip install bitsandbytes`
"""
Patrick von Platen's avatar
Patrick von Platen committed
466

467
468
469
470
471
# docstyle-ignore
IMAGEIO_IMPORT_ERROR = """
{0} requires the imageio library and ffmpeg but it was not found in your environment. You can install it with pip: `pip install imageio imageio-ffmpeg`
"""

Aryan's avatar
Aryan committed
472
# docstyle-ignore
473
474
475
476
GGUF_IMPORT_ERROR = """
{0} requires the gguf library but it was not found in your environment. You can install it with pip: `pip install gguf`
"""

Aryan's avatar
Aryan committed
477
TORCHAO_IMPORT_ERROR = """
478
479
{0} requires the torchao library but it was not found in your environment. You can install it with pip: `pip install
torchao`
Aryan's avatar
Aryan committed
480
481
"""

482
483
484
485
486
QUANTO_IMPORT_ERROR = """
{0} requires the optimum-quanto library but it was not found in your environment. You can install it with pip: `pip
install optimum-quanto`
"""

487
488
BACKENDS_MAPPING = OrderedDict(
    [
Patrick von Platen's avatar
Patrick von Platen committed
489
        ("bs4", (is_bs4_available, BS4_IMPORT_ERROR)),
490
491
        ("flax", (is_flax_available, FLAX_IMPORT_ERROR)),
        ("inflect", (is_inflect_available, INFLECT_IMPORT_ERROR)),
492
        ("onnx", (is_onnx_available, ONNX_IMPORT_ERROR)),
493
        ("opencv", (is_opencv_available, OPENCV_IMPORT_ERROR)),
494
495
496
497
        ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)),
        ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)),
        ("transformers", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)),
        ("unidecode", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)),
498
        ("librosa", (is_librosa_available, LIBROSA_IMPORT_ERROR)),
499
        ("k_diffusion", (is_k_diffusion_available, K_DIFFUSION_IMPORT_ERROR)),
500
        ("note_seq", (is_note_seq_available, NOTE_SEQ_IMPORT_ERROR)),
501
        ("wandb", (is_wandb_available, WANDB_IMPORT_ERROR)),
502
503
        ("tensorboard", (is_tensorboard_available, TENSORBOARD_IMPORT_ERROR)),
        ("compel", (is_compel_available, COMPEL_IMPORT_ERROR)),
Patrick von Platen's avatar
Patrick von Platen committed
504
        ("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)),
505
506
        ("torchsde", (is_torchsde_available, TORCHSDE_IMPORT_ERROR)),
        ("invisible_watermark", (is_invisible_watermark_available, INVISIBLE_WATERMARK_IMPORT_ERROR)),
507
508
509
        ("peft", (is_peft_available, PEFT_IMPORT_ERROR)),
        ("safetensors", (is_safetensors_available, SAFETENSORS_IMPORT_ERROR)),
        ("bitsandbytes", (is_bitsandbytes_available, BITSANDBYTES_IMPORT_ERROR)),
510
        ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)),
511
        ("imageio", (is_imageio_available, IMAGEIO_IMPORT_ERROR)),
512
        ("gguf", (is_gguf_available, GGUF_IMPORT_ERROR)),
Aryan's avatar
Aryan committed
513
        ("torchao", (is_torchao_available, TORCHAO_IMPORT_ERROR)),
514
        ("quanto", (is_optimum_quanto_available, QUANTO_IMPORT_ERROR)),
515
516
517
518
519
520
521
522
523
524
525
526
527
528
    ]
)


def requires_backends(obj, backends):
    if not isinstance(backends, (list, tuple)):
        backends = [backends]

    name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
    checks = (BACKENDS_MAPPING[backend] for backend in backends)
    failed = [msg.format(name) for available, msg in checks if not available()]
    if failed:
        raise ImportError("".join(failed))

529
530
531
532
533
    if name in [
        "VersatileDiffusionTextToImagePipeline",
        "VersatileDiffusionPipeline",
        "VersatileDiffusionDualGuidedPipeline",
        "StableDiffusionImageVariationPipeline",
534
        "UnCLIPPipeline",
535
536
537
538
539
540
    ] and is_transformers_version("<", "4.25.0"):
        raise ImportError(
            f"You need to install `transformers>=4.25` in order to use {name}: \n```\n pip install"
            " --upgrade transformers \n```"
        )

541
542
543
    if name in ["StableDiffusionDepth2ImgPipeline", "StableDiffusionPix2PixZeroPipeline"] and is_transformers_version(
        "<", "4.26.0"
    ):
544
        raise ImportError(
545
546
            f"You need to install `transformers>=4.26` in order to use {name}: \n```\n pip install"
            " --upgrade transformers \n```"
547
548
        )

549
550
551
552
553
554
555
556

class DummyObject(type):
    """
    Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by
    `requires_backend` each time a user tries to access any method of that class.
    """

    def __getattr__(cls, key):
557
        if key.startswith("_") and key not in ["_load_connected_pipes", "_is_onnx"]:
558
559
            return super().__getattr__(cls, key)
        requires_backends(cls, cls._backends)
560
561
562
563
564
565


# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L319
def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):
    """
    Compares a library version to some requirement using a given operation.
566
567

    Args:
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
        library_or_version (`str` or `packaging.version.Version`):
            A library name or a version to check.
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`.
        requirement_version (`str`):
            The version to compare the library version against
    """
    if operation not in STR_OPERATION_TO_FUNC.keys():
        raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}")
    operation = STR_OPERATION_TO_FUNC[operation]
    if isinstance(library_or_version, str):
        library_or_version = parse(importlib_metadata.version(library_or_version))
    return operation(library_or_version, parse(requirement_version))


# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L338
def is_torch_version(operation: str, version: str):
    """
    Compares the current PyTorch version to a given reference with an operation.
587
588

    Args:
589
590
591
592
593
594
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A string version of PyTorch
    """
    return compare_versions(parse(_torch_version), operation, version)
595
596


Juan Acevedo's avatar
Juan Acevedo committed
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
def is_torch_xla_version(operation: str, version: str):
    """
    Compares the current torch_xla version to a given reference with an operation.

    Args:
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A string version of torch_xla
    """
    if not is_torch_xla_available:
        return False
    return compare_versions(parse(_torch_xla_version), operation, version)


612
613
614
def is_transformers_version(operation: str, version: str):
    """
    Compares the current Transformers version to a given reference with an operation.
615
616

    Args:
617
618
619
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
620
            A version string
621
622
623
624
    """
    if not _transformers_available:
        return False
    return compare_versions(parse(_transformers_version), operation, version)
625
626


Marc Sun's avatar
Marc Sun committed
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
def is_hf_hub_version(operation: str, version: str):
    """
    Compares the current Hugging Face Hub version to a given reference with an operation.

    Args:
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
    if not _hf_hub_available:
        return False
    return compare_versions(parse(_hf_hub_version), operation, version)


642
643
644
def is_accelerate_version(operation: str, version: str):
    """
    Compares the current Accelerate version to a given reference with an operation.
645
646

    Args:
647
648
649
650
651
652
653
654
655
656
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
    if not _accelerate_available:
        return False
    return compare_versions(parse(_accelerate_version), operation, version)


657
658
659
def is_peft_version(operation: str, version: str):
    """
    Compares the current PEFT version to a given reference with an operation.
660
661

    Args:
662
663
664
665
666
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
667
    if not _peft_available:
668
669
670
671
        return False
    return compare_versions(parse(_peft_version), operation, version)


672
673
674
675
676
677
678
679
680
def is_bitsandbytes_version(operation: str, version: str):
    """
    Args:
    Compares the current bitsandbytes version to a given reference with an operation.
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
681
    if not _bitsandbytes_available:
682
683
684
685
        return False
    return compare_versions(parse(_bitsandbytes_version), operation, version)


686
687
688
689
690
691
692
693
694
695
def is_gguf_version(operation: str, version: str):
    """
    Compares the current Accelerate version to a given reference with an operation.

    Args:
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
696
    if not _gguf_available:
697
698
699
700
        return False
    return compare_versions(parse(_gguf_version), operation, version)


701
702
703
704
705
706
707
708
709
710
def is_torchao_version(operation: str, version: str):
    """
    Compares the current torchao version to a given reference with an operation.

    Args:
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
711
    if not _torchao_available:
712
713
714
715
        return False
    return compare_versions(parse(_torchao_version), operation, version)


716
717
718
def is_k_diffusion_version(operation: str, version: str):
    """
    Compares the current k-diffusion version to a given reference with an operation.
719
720

    Args:
721
722
723
724
725
726
727
728
729
730
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
    if not _k_diffusion_available:
        return False
    return compare_versions(parse(_k_diffusion_version), operation, version)


731
732
733
734
735
736
737
738
739
740
def is_optimum_quanto_version(operation: str, version: str):
    """
    Compares the current Accelerate version to a given reference with an operation.

    Args:
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
741
    if not _optimum_quanto_available:
742
743
744
745
        return False
    return compare_versions(parse(_optimum_quanto_version), operation, version)


Dhruv Nair's avatar
Dhruv Nair committed
746
747
748
def get_objects_from_module(module):
    """
    Returns a dict of object names and values in a module, while skipping private/internal objects
749
750

    Args:
Dhruv Nair's avatar
Dhruv Nair committed
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
        module (ModuleType):
            Module to extract the objects from.

    Returns:
        dict: Dictionary of object names and corresponding values
    """

    objects = {}
    for name in dir(module):
        if name.startswith("_"):
            continue
        objects[name] = getattr(module, name)

    return objects


767
class OptionalDependencyNotAvailable(BaseException):
768
769
770
    """
    An error indicating that an optional dependency of Diffusers was not found in the environment.
    """
Dhruv Nair's avatar
Dhruv Nair committed
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830


class _LazyModule(ModuleType):
    """
    Module class that surfaces all objects but only performs associated imports when the objects are requested.
    """

    # Very heavily inspired by optuna.integration._IntegrationModule
    # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py
    def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None):
        super().__init__(name)
        self._modules = set(import_structure.keys())
        self._class_to_module = {}
        for key, values in import_structure.items():
            for value in values:
                self._class_to_module[value] = key
        # Needed for autocompletion in an IDE
        self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values()))
        self.__file__ = module_file
        self.__spec__ = module_spec
        self.__path__ = [os.path.dirname(module_file)]
        self._objects = {} if extra_objects is None else extra_objects
        self._name = name
        self._import_structure = import_structure

    # Needed for autocompletion in an IDE
    def __dir__(self):
        result = super().__dir__()
        # The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether
        # they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir.
        for attr in self.__all__:
            if attr not in result:
                result.append(attr)
        return result

    def __getattr__(self, name: str) -> Any:
        if name in self._objects:
            return self._objects[name]
        if name in self._modules:
            value = self._get_module(name)
        elif name in self._class_to_module.keys():
            module = self._get_module(self._class_to_module[name])
            value = getattr(module, name)
        else:
            raise AttributeError(f"module {self.__name__} has no attribute {name}")

        setattr(self, name, value)
        return value

    def _get_module(self, module_name: str):
        try:
            return importlib.import_module("." + module_name, self.__name__)
        except Exception as e:
            raise RuntimeError(
                f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its"
                f" traceback):\n{e}"
            ) from e

    def __reduce__(self):
        return (self.__class__, (self._name, self.__file__, self._import_structure))