"git@developer.sourcefind.cn:change/sglang.git" did not exist on "f8548295d63d56b79599d900c61d6539bb6cfc74"
import_utils.py 33.6 KB
Newer Older
Aryan's avatar
Aryan committed
1
# Copyright 2025 The HuggingFace Team. All rights reserved.
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Import utilities: Utilities related to imports and our lazy inits.
"""
Mengqing Cao's avatar
Mengqing Cao committed
17

18
import importlib.util
19
import inspect
20
import operator as op
21
22
import os
import sys
23
from collections import OrderedDict, defaultdict
Charles's avatar
Charles committed
24
from functools import lru_cache as cache
Dhruv Nair's avatar
Dhruv Nair committed
25
26
from itertools import chain
from types import ModuleType
27
from typing import Any, Tuple, Union
28

Lucain's avatar
Lucain committed
29
from huggingface_hub.utils import is_jinja_available  # noqa: F401
30
from packaging.version import Version, parse
31
32
33
34
35
36
37
38
39

from . import logging


# The package importlib_metadata is in a different place, depending on the python version.
if sys.version_info < (3, 8):
    import importlib_metadata
else:
    import importlib.metadata as importlib_metadata
40
41
42
43
try:
    _package_map = importlib_metadata.packages_distributions()  # load-once to avoid expensive calls
except Exception:
    _package_map = None
44
45
46
47
48
49
50
51
52

logger = logging.get_logger(__name__)  # pylint: disable=invalid-name

ENV_VARS_TRUE_VALUES = {"1", "ON", "YES", "TRUE"}
ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({"AUTO"})

USE_TF = os.environ.get("USE_TF", "AUTO").upper()
USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper()
USE_JAX = os.environ.get("USE_FLAX", "AUTO").upper()
53
USE_SAFETENSORS = os.environ.get("USE_SAFETENSORS", "AUTO").upper()
54
55
DIFFUSERS_SLOW_IMPORT = os.environ.get("DIFFUSERS_SLOW_IMPORT", "FALSE").upper()
DIFFUSERS_SLOW_IMPORT = DIFFUSERS_SLOW_IMPORT in ENV_VARS_TRUE_VALUES
56

57
58
STR_OPERATION_TO_FUNC = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt}

59
60
61
_is_google_colab = "google.colab" in sys.modules or any(k.startswith("COLAB_") for k in os.environ)


62
def _is_package_available(pkg_name: str, get_dist_name: bool = False) -> Tuple[bool, str]:
63
    global _package_map
64
65
66
67
    pkg_exists = importlib.util.find_spec(pkg_name) is not None
    pkg_version = "N/A"

    if pkg_exists:
68
69
70
71
72
73
        if _package_map is None:
            _package_map = defaultdict(list)
            try:
                # Fallback for Python < 3.10
                for dist in importlib_metadata.distributions():
                    _top_level_declared = (dist.read_text("top_level.txt") or "").split()
74
75
                    # Infer top-level package names from file structure
                    _inferred_opt_names = {
76
77
                        f.parts[0] if len(f.parts) > 1 else inspect.getmodulename(f) for f in (dist.files or [])
                    } - {None}
78
                    _top_level_inferred = filter(lambda name: "." not in name, _inferred_opt_names)
79
80
81
82
                    for pkg in _top_level_declared or _top_level_inferred:
                        _package_map[pkg].append(dist.metadata["Name"])
            except Exception as _:
                pass
83
        try:
84
85
            if get_dist_name and pkg_name in _package_map and _package_map[pkg_name]:
                if len(_package_map[pkg_name]) > 1:
86
                    logger.warning(
87
                        f"Multiple distributions found for package {pkg_name}. Picked distribution: {_package_map[pkg_name][0]}"
88
                    )
89
                pkg_name = _package_map[pkg_name][0]
90
91
92
93
94
95
96
97
98
99
100
            pkg_version = importlib_metadata.version(pkg_name)
            logger.debug(f"Successfully imported {pkg_name} version {pkg_version}")
        except (ImportError, importlib_metadata.PackageNotFoundError):
            pkg_exists = False

    return pkg_exists, pkg_version


if USE_TORCH in ENV_VARS_TRUE_AND_AUTO_VALUES and USE_TF not in ENV_VARS_TRUE_VALUES:
    _torch_available, _torch_version = _is_package_available("torch")

101
else:
102
    logger.info("Disabling PyTorch because USE_TORCH is set")
103
    _torch_available = False
104
    _torch_version = "N/A"
105

106
107
_jax_version = "N/A"
_flax_version = "N/A"
108
109
110
111
112
113
114
115
116
117
118
119
if USE_JAX in ENV_VARS_TRUE_AND_AUTO_VALUES:
    _flax_available = importlib.util.find_spec("jax") is not None and importlib.util.find_spec("flax") is not None
    if _flax_available:
        try:
            _jax_version = importlib_metadata.version("jax")
            _flax_version = importlib_metadata.version("flax")
            logger.info(f"JAX version {_jax_version}, Flax version {_flax_version} available.")
        except importlib_metadata.PackageNotFoundError:
            _flax_available = False
else:
    _flax_available = False

120
if USE_SAFETENSORS in ENV_VARS_TRUE_AND_AUTO_VALUES:
121
122
    _safetensors_available, _safetensors_version = _is_package_available("safetensors")

123
else:
124
    logger.info("Disabling Safetensors because USE_SAFETENSORS is set")
125
    _safetensors_available = False
126

127
_onnxruntime_version = "N/A"
128
_onnx_available = importlib.util.find_spec("onnxruntime") is not None
SkyTNT's avatar
SkyTNT committed
129
if _onnx_available:
130
131
    candidates = (
        "onnxruntime",
132
133
134
        "onnxruntime-cann",
        "onnxruntime-directml",
        "ort_nightly_directml",
135
        "onnxruntime-gpu",
136
        "ort_nightly_gpu",
137
        "onnxruntime-migraphx",
138
        "onnxruntime-openvino",
139
        "onnxruntime-qnn",
140
141
        "onnxruntime-rocm",
        "onnxruntime-training",
142
        "onnxruntime-vitisai",
143
    )
SkyTNT's avatar
SkyTNT committed
144
    _onnxruntime_version = None
145
    # For the metadata, we have to look for both onnxruntime and onnxruntime-x
SkyTNT's avatar
SkyTNT committed
146
147
148
149
150
151
152
153
154
    for pkg in candidates:
        try:
            _onnxruntime_version = importlib_metadata.version(pkg)
            break
        except importlib_metadata.PackageNotFoundError:
            pass
    _onnx_available = _onnxruntime_version is not None
    if _onnx_available:
        logger.debug(f"Successfully imported onnxruntime version {_onnxruntime_version}")
155

156
157
158
# (sayakpaul): importlib.util.find_spec("opencv-python") returns None even when it's installed.
# _opencv_available = importlib.util.find_spec("opencv-python") is not None
try:
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
    candidates = (
        "opencv-python",
        "opencv-contrib-python",
        "opencv-python-headless",
        "opencv-contrib-python-headless",
    )
    _opencv_version = None
    for pkg in candidates:
        try:
            _opencv_version = importlib_metadata.version(pkg)
            break
        except importlib_metadata.PackageNotFoundError:
            pass
    _opencv_available = _opencv_version is not None
    if _opencv_available:
        logger.debug(f"Successfully imported cv2 version {_opencv_version}")
175
176
except importlib_metadata.PackageNotFoundError:
    _opencv_available = False
177

Patrick von Platen's avatar
Patrick von Platen committed
178
179
180
181
182
183
184
185
_bs4_available = importlib.util.find_spec("bs4") is not None
try:
    # importlib metadata under different name
    _bs4_version = importlib_metadata.version("beautifulsoup4")
    logger.debug(f"Successfully imported ftfy version {_bs4_version}")
except importlib_metadata.PackageNotFoundError:
    _bs4_available = False

186
187
188
189
190
191
192
_invisible_watermark_available = importlib.util.find_spec("imwatermark") is not None
try:
    _invisible_watermark_version = importlib_metadata.version("invisible-watermark")
    logger.debug(f"Successfully imported invisible-watermark version {_invisible_watermark_version}")
except importlib_metadata.PackageNotFoundError:
    _invisible_watermark_available = False

193
194
195
196
_torch_xla_available, _torch_xla_version = _is_package_available("torch_xla")
_torch_npu_available, _torch_npu_version = _is_package_available("torch_npu")
_transformers_available, _transformers_version = _is_package_available("transformers")
_hf_hub_available, _hf_hub_version = _is_package_available("huggingface_hub")
197
_kernels_available, _kernels_version = _is_package_available("kernels")
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
_inflect_available, _inflect_version = _is_package_available("inflect")
_unidecode_available, _unidecode_version = _is_package_available("unidecode")
_k_diffusion_available, _k_diffusion_version = _is_package_available("k_diffusion")
_note_seq_available, _note_seq_version = _is_package_available("note_seq")
_wandb_available, _wandb_version = _is_package_available("wandb")
_tensorboard_available, _tensorboard_version = _is_package_available("tensorboard")
_compel_available, _compel_version = _is_package_available("compel")
_sentencepiece_available, _sentencepiece_version = _is_package_available("sentencepiece")
_torchsde_available, _torchsde_version = _is_package_available("torchsde")
_peft_available, _peft_version = _is_package_available("peft")
_torchvision_available, _torchvision_version = _is_package_available("torchvision")
_matplotlib_available, _matplotlib_version = _is_package_available("matplotlib")
_timm_available, _timm_version = _is_package_available("timm")
_bitsandbytes_available, _bitsandbytes_version = _is_package_available("bitsandbytes")
_imageio_available, _imageio_version = _is_package_available("imageio")
_ftfy_available, _ftfy_version = _is_package_available("ftfy")
_scipy_available, _scipy_version = _is_package_available("scipy")
_librosa_available, _librosa_version = _is_package_available("librosa")
_accelerate_available, _accelerate_version = _is_package_available("accelerate")
_xformers_available, _xformers_version = _is_package_available("xformers")
_gguf_available, _gguf_version = _is_package_available("gguf")
_torchao_available, _torchao_version = _is_package_available("torchao")
_bitsandbytes_available, _bitsandbytes_version = _is_package_available("bitsandbytes")
221
_optimum_quanto_available, _optimum_quanto_version = _is_package_available("optimum", get_dist_name=True)
Aryan's avatar
Aryan committed
222
223
224
225
_pytorch_retinaface_available, _pytorch_retinaface_version = _is_package_available("pytorch_retinaface")
_better_profanity_available, _better_profanity_version = _is_package_available("better_profanity")
_nltk_available, _nltk_version = _is_package_available("nltk")
_cosmos_guardrail_available, _cosmos_guardrail_version = _is_package_available("cosmos_guardrail")
226
227
228
_sageattention_available, _sageattention_version = _is_package_available("sageattention")
_flash_attn_available, _flash_attn_version = _is_package_available("flash_attn")
_flash_attn_3_available, _flash_attn_3_version = _is_package_available("flash_attn_3")
229
_kornia_available, _kornia_version = _is_package_available("kornia")
230
_nvidia_modelopt_available, _nvidia_modelopt_version = _is_package_available("modelopt", get_dist_name=True)
231
232


233
234
235
236
def is_torch_available():
    return _torch_available


237
238
239
240
def is_torch_xla_available():
    return _torch_xla_available


Mengqing Cao's avatar
Mengqing Cao committed
241
242
243
244
def is_torch_npu_available():
    return _torch_npu_available


245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
def is_flax_available():
    return _flax_available


def is_transformers_available():
    return _transformers_available


def is_inflect_available():
    return _inflect_available


def is_unidecode_available():
    return _unidecode_available


261
262
263
264
def is_onnx_available():
    return _onnx_available


265
266
267
268
def is_opencv_available():
    return _opencv_available


269
270
271
272
def is_scipy_available():
    return _scipy_available


273
274
275
276
def is_librosa_available():
    return _librosa_available


277
278
279
280
def is_xformers_available():
    return _xformers_available


281
282
283
284
def is_accelerate_available():
    return _accelerate_available


285
286
287
288
def is_kernels_available():
    return _kernels_available


289
290
291
292
def is_k_diffusion_available():
    return _k_diffusion_available


293
294
295
296
def is_note_seq_available():
    return _note_seq_available


297
298
299
300
def is_wandb_available():
    return _wandb_available


301
302
303
304
def is_tensorboard_available():
    return _tensorboard_available


305
306
307
308
def is_compel_available():
    return _compel_available


Patrick von Platen's avatar
Patrick von Platen committed
309
310
311
312
313
314
315
316
def is_ftfy_available():
    return _ftfy_available


def is_bs4_available():
    return _bs4_available


317
318
319
320
def is_torchsde_available():
    return _torchsde_available


321
322
323
324
def is_invisible_watermark_available():
    return _invisible_watermark_available


325
326
327
328
def is_peft_available():
    return _peft_available


YiYi Xu's avatar
YiYi Xu committed
329
330
331
332
def is_torchvision_available():
    return _torchvision_available


333
334
335
336
def is_matplotlib_available():
    return _matplotlib_available


337
338
339
340
341
342
343
344
345
346
347
348
def is_safetensors_available():
    return _safetensors_available


def is_bitsandbytes_available():
    return _bitsandbytes_available


def is_google_colab():
    return _is_google_colab


349
350
351
352
def is_sentencepiece_available():
    return _sentencepiece_available


353
354
355
356
def is_imageio_available():
    return _imageio_available


357
def is_gguf_available():
358
    return _gguf_available
359
360


Aryan's avatar
Aryan committed
361
def is_torchao_available():
362
    return _torchao_available
Aryan's avatar
Aryan committed
363
364


365
def is_optimum_quanto_available():
366
367
368
    return _optimum_quanto_available


369
370
371
372
def is_nvidia_modelopt_available():
    return _nvidia_modelopt_available


373
374
def is_timm_available():
    return _timm_available
375
376


Aryan's avatar
Aryan committed
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
def is_pytorch_retinaface_available():
    return _pytorch_retinaface_available


def is_better_profanity_available():
    return _better_profanity_available


def is_nltk_available():
    return _nltk_available


def is_cosmos_guardrail_available():
    return _cosmos_guardrail_available


393
394
395
396
def is_hpu_available():
    return all(importlib.util.find_spec(lib) for lib in ("habana_frameworks", "habana_frameworks.torch"))


397
398
399
400
401
402
403
404
405
406
407
408
def is_sageattention_available():
    return _sageattention_available


def is_flash_attn_available():
    return _flash_attn_available


def is_flash_attn_3_available():
    return _flash_attn_3_available


409
410
411
412
def is_kornia_available():
    return _kornia_available


413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
# docstyle-ignore
FLAX_IMPORT_ERROR = """
{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the
installation page: https://github.com/google/flax and follow the ones that match your environment.
"""

# docstyle-ignore
INFLECT_IMPORT_ERROR = """
{0} requires the inflect library but it was not found in your environment. You can install it with pip: `pip install
inflect`
"""

# docstyle-ignore
PYTORCH_IMPORT_ERROR = """
{0} requires the PyTorch library but it was not found in your environment. Checkout the instructions on the
installation page: https://pytorch.org/get-started/locally/ and follow the ones that match your environment.
"""

431
432
433
434
435
436
# docstyle-ignore
ONNX_IMPORT_ERROR = """
{0} requires the onnxruntime library but it was not found in your environment. You can install it with pip: `pip
install onnxruntime`
"""

437
438
439
440
441
442
# docstyle-ignore
OPENCV_IMPORT_ERROR = """
{0} requires the OpenCV library but it was not found in your environment. You can install it with pip: `pip
install opencv-python`
"""

443
444
445
446
447
448
# docstyle-ignore
SCIPY_IMPORT_ERROR = """
{0} requires the scipy library but it was not found in your environment. You can install it with pip: `pip install
scipy`
"""

449
450
451
452
453
454
# docstyle-ignore
LIBROSA_IMPORT_ERROR = """
{0} requires the librosa library but it was not found in your environment.  Checkout the instructions on the
installation page: https://librosa.org/doc/latest/install.html and follow the ones that match your environment.
"""

455
456
457
458
459
460
461
462
463
464
465
466
# docstyle-ignore
TRANSFORMERS_IMPORT_ERROR = """
{0} requires the transformers library but it was not found in your environment. You can install it with pip: `pip
install transformers`
"""

# docstyle-ignore
UNIDECODE_IMPORT_ERROR = """
{0} requires the unidecode library but it was not found in your environment. You can install it with pip: `pip install
Unidecode`
"""

467
468
469
470
471
472
# docstyle-ignore
K_DIFFUSION_IMPORT_ERROR = """
{0} requires the k-diffusion library but it was not found in your environment. You can install it with pip: `pip
install k-diffusion`
"""

473
474
475
476
477
478
# docstyle-ignore
NOTE_SEQ_IMPORT_ERROR = """
{0} requires the note-seq library but it was not found in your environment. You can install it with pip: `pip
install note-seq`
"""

479
480
481
482
483
484
# docstyle-ignore
WANDB_IMPORT_ERROR = """
{0} requires the wandb library but it was not found in your environment. You can install it with pip: `pip
install wandb`
"""

485
486
487
488
489
490
# docstyle-ignore
TENSORBOARD_IMPORT_ERROR = """
{0} requires the tensorboard library but it was not found in your environment. You can install it with pip: `pip
install tensorboard`
"""

491
492
493
494
495
496

# docstyle-ignore
COMPEL_IMPORT_ERROR = """
{0} requires the compel library but it was not found in your environment. You can install it with pip: `pip install compel`
"""

Patrick von Platen's avatar
Patrick von Platen committed
497
498
499
500
501
502
503
504
505
506
507
508
509
# docstyle-ignore
BS4_IMPORT_ERROR = """
{0} requires the Beautiful Soup library but it was not found in your environment. You can install it with pip:
`pip install beautifulsoup4`. Please note that you may need to restart your runtime after installation.
"""

# docstyle-ignore
FTFY_IMPORT_ERROR = """
{0} requires the ftfy library but it was not found in your environment. Checkout the instructions on the
installation section: https://github.com/rspeer/python-ftfy/tree/master#installing and follow the ones
that match your environment. Please note that you may need to restart your runtime after installation.
"""

510
511
512
513
514
# docstyle-ignore
TORCHSDE_IMPORT_ERROR = """
{0} requires the torchsde library but it was not found in your environment. You can install it with pip: `pip install torchsde`
"""

515
516
# docstyle-ignore
INVISIBLE_WATERMARK_IMPORT_ERROR = """
517
{0} requires the invisible-watermark library but it was not found in your environment. You can install it with pip: `pip install invisible-watermark>=0.2.0`
518
519
"""

520
521
522
523
524
525
526
527
528
529
# docstyle-ignore
PEFT_IMPORT_ERROR = """
{0} requires the peft library but it was not found in your environment. You can install it with pip: `pip install peft`
"""

# docstyle-ignore
SAFETENSORS_IMPORT_ERROR = """
{0} requires the safetensors library but it was not found in your environment. You can install it with pip: `pip install safetensors`
"""

530
531
532
533
534
535
# docstyle-ignore
SENTENCEPIECE_IMPORT_ERROR = """
{0} requires the sentencepiece library but it was not found in your environment. You can install it with pip: `pip install sentencepiece`
"""


536
537
538
539
# docstyle-ignore
BITSANDBYTES_IMPORT_ERROR = """
{0} requires the bitsandbytes library but it was not found in your environment. You can install it with pip: `pip install bitsandbytes`
"""
Patrick von Platen's avatar
Patrick von Platen committed
540

541
542
543
544
545
# docstyle-ignore
IMAGEIO_IMPORT_ERROR = """
{0} requires the imageio library and ffmpeg but it was not found in your environment. You can install it with pip: `pip install imageio imageio-ffmpeg`
"""

Aryan's avatar
Aryan committed
546
# docstyle-ignore
547
548
549
550
GGUF_IMPORT_ERROR = """
{0} requires the gguf library but it was not found in your environment. You can install it with pip: `pip install gguf`
"""

Aryan's avatar
Aryan committed
551
TORCHAO_IMPORT_ERROR = """
552
553
{0} requires the torchao library but it was not found in your environment. You can install it with pip: `pip install
torchao`
Aryan's avatar
Aryan committed
554
555
"""

556
557
558
559
560
QUANTO_IMPORT_ERROR = """
{0} requires the optimum-quanto library but it was not found in your environment. You can install it with pip: `pip
install optimum-quanto`
"""

Aryan's avatar
Aryan committed
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
# docstyle-ignore
PYTORCH_RETINAFACE_IMPORT_ERROR = """
{0} requires the pytorch_retinaface library but it was not found in your environment. You can install it with pip: `pip install pytorch_retinaface`
"""

# docstyle-ignore
BETTER_PROFANITY_IMPORT_ERROR = """
{0} requires the better_profanity library but it was not found in your environment. You can install it with pip: `pip install better_profanity`
"""

# docstyle-ignore
NLTK_IMPORT_ERROR = """
{0} requires the nltk library but it was not found in your environment. You can install it with pip: `pip install nltk`
"""


577
578
BACKENDS_MAPPING = OrderedDict(
    [
Patrick von Platen's avatar
Patrick von Platen committed
579
        ("bs4", (is_bs4_available, BS4_IMPORT_ERROR)),
580
581
        ("flax", (is_flax_available, FLAX_IMPORT_ERROR)),
        ("inflect", (is_inflect_available, INFLECT_IMPORT_ERROR)),
582
        ("onnx", (is_onnx_available, ONNX_IMPORT_ERROR)),
583
        ("opencv", (is_opencv_available, OPENCV_IMPORT_ERROR)),
584
585
586
587
        ("scipy", (is_scipy_available, SCIPY_IMPORT_ERROR)),
        ("torch", (is_torch_available, PYTORCH_IMPORT_ERROR)),
        ("transformers", (is_transformers_available, TRANSFORMERS_IMPORT_ERROR)),
        ("unidecode", (is_unidecode_available, UNIDECODE_IMPORT_ERROR)),
588
        ("librosa", (is_librosa_available, LIBROSA_IMPORT_ERROR)),
589
        ("k_diffusion", (is_k_diffusion_available, K_DIFFUSION_IMPORT_ERROR)),
590
        ("note_seq", (is_note_seq_available, NOTE_SEQ_IMPORT_ERROR)),
591
        ("wandb", (is_wandb_available, WANDB_IMPORT_ERROR)),
592
593
        ("tensorboard", (is_tensorboard_available, TENSORBOARD_IMPORT_ERROR)),
        ("compel", (is_compel_available, COMPEL_IMPORT_ERROR)),
Patrick von Platen's avatar
Patrick von Platen committed
594
        ("ftfy", (is_ftfy_available, FTFY_IMPORT_ERROR)),
595
596
        ("torchsde", (is_torchsde_available, TORCHSDE_IMPORT_ERROR)),
        ("invisible_watermark", (is_invisible_watermark_available, INVISIBLE_WATERMARK_IMPORT_ERROR)),
597
598
599
        ("peft", (is_peft_available, PEFT_IMPORT_ERROR)),
        ("safetensors", (is_safetensors_available, SAFETENSORS_IMPORT_ERROR)),
        ("bitsandbytes", (is_bitsandbytes_available, BITSANDBYTES_IMPORT_ERROR)),
600
        ("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)),
601
        ("imageio", (is_imageio_available, IMAGEIO_IMPORT_ERROR)),
602
        ("gguf", (is_gguf_available, GGUF_IMPORT_ERROR)),
Aryan's avatar
Aryan committed
603
        ("torchao", (is_torchao_available, TORCHAO_IMPORT_ERROR)),
604
        ("quanto", (is_optimum_quanto_available, QUANTO_IMPORT_ERROR)),
Aryan's avatar
Aryan committed
605
606
607
        ("pytorch_retinaface", (is_pytorch_retinaface_available, PYTORCH_RETINAFACE_IMPORT_ERROR)),
        ("better_profanity", (is_better_profanity_available, BETTER_PROFANITY_IMPORT_ERROR)),
        ("nltk", (is_nltk_available, NLTK_IMPORT_ERROR)),
608
609
610
611
612
613
614
615
616
617
618
619
620
621
    ]
)


def requires_backends(obj, backends):
    if not isinstance(backends, (list, tuple)):
        backends = [backends]

    name = obj.__name__ if hasattr(obj, "__name__") else obj.__class__.__name__
    checks = (BACKENDS_MAPPING[backend] for backend in backends)
    failed = [msg.format(name) for available, msg in checks if not available()]
    if failed:
        raise ImportError("".join(failed))

622
623
624
625
626
    if name in [
        "VersatileDiffusionTextToImagePipeline",
        "VersatileDiffusionPipeline",
        "VersatileDiffusionDualGuidedPipeline",
        "StableDiffusionImageVariationPipeline",
627
        "UnCLIPPipeline",
628
629
630
631
632
633
    ] and is_transformers_version("<", "4.25.0"):
        raise ImportError(
            f"You need to install `transformers>=4.25` in order to use {name}: \n```\n pip install"
            " --upgrade transformers \n```"
        )

634
635
636
    if name in ["StableDiffusionDepth2ImgPipeline", "StableDiffusionPix2PixZeroPipeline"] and is_transformers_version(
        "<", "4.26.0"
    ):
637
        raise ImportError(
638
639
            f"You need to install `transformers>=4.26` in order to use {name}: \n```\n pip install"
            " --upgrade transformers \n```"
640
641
        )

642
643
644
645
646
647
648
649

class DummyObject(type):
    """
    Metaclass for the dummy objects. Any class inheriting from it will return the ImportError generated by
    `requires_backend` each time a user tries to access any method of that class.
    """

    def __getattr__(cls, key):
650
        if key.startswith("_") and key not in ["_load_connected_pipes", "_is_onnx"]:
651
652
            return super().__getattr__(cls, key)
        requires_backends(cls, cls._backends)
653
654
655
656
657
658


# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L319
def compare_versions(library_or_version: Union[str, Version], operation: str, requirement_version: str):
    """
    Compares a library version to some requirement using a given operation.
659
660

    Args:
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
        library_or_version (`str` or `packaging.version.Version`):
            A library name or a version to check.
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`.
        requirement_version (`str`):
            The version to compare the library version against
    """
    if operation not in STR_OPERATION_TO_FUNC.keys():
        raise ValueError(f"`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys())}, received {operation}")
    operation = STR_OPERATION_TO_FUNC[operation]
    if isinstance(library_or_version, str):
        library_or_version = parse(importlib_metadata.version(library_or_version))
    return operation(library_or_version, parse(requirement_version))


# This function was copied from: https://github.com/huggingface/accelerate/blob/874c4967d94badd24f893064cc3bef45f57cadf7/src/accelerate/utils/versions.py#L338
Charles's avatar
Charles committed
677
@cache
678
679
680
def is_torch_version(operation: str, version: str):
    """
    Compares the current PyTorch version to a given reference with an operation.
681
682

    Args:
683
684
685
686
687
688
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A string version of PyTorch
    """
    return compare_versions(parse(_torch_version), operation, version)
689
690


Charles's avatar
Charles committed
691
@cache
Juan Acevedo's avatar
Juan Acevedo committed
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
def is_torch_xla_version(operation: str, version: str):
    """
    Compares the current torch_xla version to a given reference with an operation.

    Args:
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A string version of torch_xla
    """
    if not is_torch_xla_available:
        return False
    return compare_versions(parse(_torch_xla_version), operation, version)


Charles's avatar
Charles committed
707
@cache
708
709
710
def is_transformers_version(operation: str, version: str):
    """
    Compares the current Transformers version to a given reference with an operation.
711
712

    Args:
713
714
715
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
716
            A version string
717
718
719
720
    """
    if not _transformers_available:
        return False
    return compare_versions(parse(_transformers_version), operation, version)
721
722


Charles's avatar
Charles committed
723
@cache
Marc Sun's avatar
Marc Sun committed
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
def is_hf_hub_version(operation: str, version: str):
    """
    Compares the current Hugging Face Hub version to a given reference with an operation.

    Args:
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
    if not _hf_hub_available:
        return False
    return compare_versions(parse(_hf_hub_version), operation, version)


Charles's avatar
Charles committed
739
@cache
740
741
742
def is_accelerate_version(operation: str, version: str):
    """
    Compares the current Accelerate version to a given reference with an operation.
743
744

    Args:
745
746
747
748
749
750
751
752
753
754
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
    if not _accelerate_available:
        return False
    return compare_versions(parse(_accelerate_version), operation, version)


Charles's avatar
Charles committed
755
@cache
756
757
758
def is_peft_version(operation: str, version: str):
    """
    Compares the current PEFT version to a given reference with an operation.
759
760

    Args:
761
762
763
764
765
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
766
    if not _peft_available:
767
768
769
770
        return False
    return compare_versions(parse(_peft_version), operation, version)


Charles's avatar
Charles committed
771
@cache
772
773
774
775
776
777
778
779
780
def is_bitsandbytes_version(operation: str, version: str):
    """
    Args:
    Compares the current bitsandbytes version to a given reference with an operation.
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
781
    if not _bitsandbytes_available:
782
783
784
785
        return False
    return compare_versions(parse(_bitsandbytes_version), operation, version)


Charles's avatar
Charles committed
786
@cache
787
788
789
790
791
792
793
794
795
796
def is_gguf_version(operation: str, version: str):
    """
    Compares the current Accelerate version to a given reference with an operation.

    Args:
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
797
    if not _gguf_available:
798
799
800
801
        return False
    return compare_versions(parse(_gguf_version), operation, version)


Charles's avatar
Charles committed
802
@cache
803
804
805
806
807
808
809
810
811
812
def is_torchao_version(operation: str, version: str):
    """
    Compares the current torchao version to a given reference with an operation.

    Args:
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
813
    if not _torchao_available:
814
815
816
817
        return False
    return compare_versions(parse(_torchao_version), operation, version)


Charles's avatar
Charles committed
818
@cache
819
820
821
def is_k_diffusion_version(operation: str, version: str):
    """
    Compares the current k-diffusion version to a given reference with an operation.
822
823

    Args:
824
825
826
827
828
829
830
831
832
833
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
    if not _k_diffusion_available:
        return False
    return compare_versions(parse(_k_diffusion_version), operation, version)


Charles's avatar
Charles committed
834
@cache
835
836
837
838
839
840
841
842
843
844
def is_optimum_quanto_version(operation: str, version: str):
    """
    Compares the current Accelerate version to a given reference with an operation.

    Args:
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
845
    if not _optimum_quanto_available:
846
847
848
849
        return False
    return compare_versions(parse(_optimum_quanto_version), operation, version)


Charles's avatar
Charles committed
850
@cache
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
def is_nvidia_modelopt_version(operation: str, version: str):
    """
    Compares the current Nvidia ModelOpt version to a given reference with an operation.

    Args:
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
    if not _nvidia_modelopt_available:
        return False
    return compare_versions(parse(_nvidia_modelopt_version), operation, version)


Charles's avatar
Charles committed
866
@cache
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
def is_xformers_version(operation: str, version: str):
    """
    Compares the current xformers version to a given reference with an operation.

    Args:
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
    if not _xformers_available:
        return False
    return compare_versions(parse(_xformers_version), operation, version)


Charles's avatar
Charles committed
882
@cache
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
def is_sageattention_version(operation: str, version: str):
    """
    Compares the current sageattention version to a given reference with an operation.

    Args:
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
    if not _sageattention_available:
        return False
    return compare_versions(parse(_sageattention_version), operation, version)


Charles's avatar
Charles committed
898
@cache
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
def is_flash_attn_version(operation: str, version: str):
    """
    Compares the current flash-attention version to a given reference with an operation.

    Args:
        operation (`str`):
            A string representation of an operator, such as `">"` or `"<="`
        version (`str`):
            A version string
    """
    if not _flash_attn_available:
        return False
    return compare_versions(parse(_flash_attn_version), operation, version)


Dhruv Nair's avatar
Dhruv Nair committed
914
915
916
def get_objects_from_module(module):
    """
    Returns a dict of object names and values in a module, while skipping private/internal objects
917
918

    Args:
Dhruv Nair's avatar
Dhruv Nair committed
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
        module (ModuleType):
            Module to extract the objects from.

    Returns:
        dict: Dictionary of object names and corresponding values
    """

    objects = {}
    for name in dir(module):
        if name.startswith("_"):
            continue
        objects[name] = getattr(module, name)

    return objects


935
class OptionalDependencyNotAvailable(BaseException):
936
937
938
    """
    An error indicating that an optional dependency of Diffusers was not found in the environment.
    """
Dhruv Nair's avatar
Dhruv Nair committed
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998


class _LazyModule(ModuleType):
    """
    Module class that surfaces all objects but only performs associated imports when the objects are requested.
    """

    # Very heavily inspired by optuna.integration._IntegrationModule
    # https://github.com/optuna/optuna/blob/master/optuna/integration/__init__.py
    def __init__(self, name, module_file, import_structure, module_spec=None, extra_objects=None):
        super().__init__(name)
        self._modules = set(import_structure.keys())
        self._class_to_module = {}
        for key, values in import_structure.items():
            for value in values:
                self._class_to_module[value] = key
        # Needed for autocompletion in an IDE
        self.__all__ = list(import_structure.keys()) + list(chain(*import_structure.values()))
        self.__file__ = module_file
        self.__spec__ = module_spec
        self.__path__ = [os.path.dirname(module_file)]
        self._objects = {} if extra_objects is None else extra_objects
        self._name = name
        self._import_structure = import_structure

    # Needed for autocompletion in an IDE
    def __dir__(self):
        result = super().__dir__()
        # The elements of self.__all__ that are submodules may or may not be in the dir already, depending on whether
        # they have been accessed or not. So we only add the elements of self.__all__ that are not already in the dir.
        for attr in self.__all__:
            if attr not in result:
                result.append(attr)
        return result

    def __getattr__(self, name: str) -> Any:
        if name in self._objects:
            return self._objects[name]
        if name in self._modules:
            value = self._get_module(name)
        elif name in self._class_to_module.keys():
            module = self._get_module(self._class_to_module[name])
            value = getattr(module, name)
        else:
            raise AttributeError(f"module {self.__name__} has no attribute {name}")

        setattr(self, name, value)
        return value

    def _get_module(self, module_name: str):
        try:
            return importlib.import_module("." + module_name, self.__name__)
        except Exception as e:
            raise RuntimeError(
                f"Failed to import {self.__name__}.{module_name} because of the following error (look up to see its"
                f" traceback):\n{e}"
            ) from e

    def __reduce__(self):
        return (self.__class__, (self._name, self.__file__, self._import_structure))