"...api/git@developer.sourcefind.cn:renzhc/diffusers_dcu.git" did not exist on "9efe1e52c3f20ed02ad91097b3e471a45e1ba8ed"
Unverified Commit bc3c73ad authored by YiYi Xu's avatar YiYi Xu Committed by GitHub
Browse files

add sentencepiece as a soft dependency (#9065)



* add sentencepiece as  soft dependency for kolors

* up

---------
Co-authored-by: default avatarSayak Paul <spsayakpaul@gmail.com>
parent 5934873b
...@@ -12,6 +12,7 @@ from .utils import ( ...@@ -12,6 +12,7 @@ from .utils import (
is_note_seq_available, is_note_seq_available,
is_onnx_available, is_onnx_available,
is_scipy_available, is_scipy_available,
is_sentencepiece_available,
is_torch_available, is_torch_available,
is_torchsde_available, is_torchsde_available,
is_transformers_available, is_transformers_available,
...@@ -246,8 +247,6 @@ else: ...@@ -246,8 +247,6 @@ else:
"AuraFlowPipeline", "AuraFlowPipeline",
"BlipDiffusionControlNetPipeline", "BlipDiffusionControlNetPipeline",
"BlipDiffusionPipeline", "BlipDiffusionPipeline",
"ChatGLMModel",
"ChatGLMTokenizer",
"CLIPImageProjection", "CLIPImageProjection",
"CycleDiffusionPipeline", "CycleDiffusionPipeline",
"FluxPipeline", "FluxPipeline",
...@@ -386,6 +385,19 @@ except OptionalDependencyNotAvailable: ...@@ -386,6 +385,19 @@ except OptionalDependencyNotAvailable:
else: else:
_import_structure["pipelines"].extend(["StableDiffusionKDiffusionPipeline", "StableDiffusionXLKDiffusionPipeline"]) _import_structure["pipelines"].extend(["StableDiffusionKDiffusionPipeline", "StableDiffusionXLKDiffusionPipeline"])
try:
if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils import dummy_torch_and_transformers_and_sentencepiece_objects # noqa F403
_import_structure["utils.dummy_torch_and_transformers_and_sentencepiece_objects"] = [
name for name in dir(dummy_torch_and_transformers_and_sentencepiece_objects) if not name.startswith("_")
]
else:
_import_structure["pipelines"].extend(["KolorsImg2ImgPipeline", "KolorsPipeline"])
try: try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()): if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable() raise OptionalDependencyNotAvailable()
...@@ -670,8 +682,6 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: ...@@ -670,8 +682,6 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
AudioLDM2UNet2DConditionModel, AudioLDM2UNet2DConditionModel,
AudioLDMPipeline, AudioLDMPipeline,
AuraFlowPipeline, AuraFlowPipeline,
ChatGLMModel,
ChatGLMTokenizer,
CLIPImageProjection, CLIPImageProjection,
CycleDiffusionPipeline, CycleDiffusionPipeline,
FluxPipeline, FluxPipeline,
...@@ -705,8 +715,6 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: ...@@ -705,8 +715,6 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
KandinskyV22Pipeline, KandinskyV22Pipeline,
KandinskyV22PriorEmb2EmbPipeline, KandinskyV22PriorEmb2EmbPipeline,
KandinskyV22PriorPipeline, KandinskyV22PriorPipeline,
KolorsImg2ImgPipeline,
KolorsPipeline,
LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelImg2ImgPipeline,
LatentConsistencyModelPipeline, LatentConsistencyModelPipeline,
LattePipeline, LattePipeline,
...@@ -804,6 +812,13 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: ...@@ -804,6 +812,13 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
else: else:
from .pipelines import StableDiffusionKDiffusionPipeline, StableDiffusionXLKDiffusionPipeline from .pipelines import StableDiffusionKDiffusionPipeline, StableDiffusionXLKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_sentencepiece_objects import * # noqa F403
else:
from .pipelines import KolorsImg2ImgPipeline, KolorsPipeline
try: try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()): if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable() raise OptionalDependencyNotAvailable()
......
...@@ -10,6 +10,7 @@ from ..utils import ( ...@@ -10,6 +10,7 @@ from ..utils import (
is_librosa_available, is_librosa_available,
is_note_seq_available, is_note_seq_available,
is_onnx_available, is_onnx_available,
is_sentencepiece_available,
is_torch_available, is_torch_available,
is_torch_npu_available, is_torch_npu_available,
is_transformers_available, is_transformers_available,
...@@ -205,12 +206,6 @@ else: ...@@ -205,12 +206,6 @@ else:
"Kandinsky3Img2ImgPipeline", "Kandinsky3Img2ImgPipeline",
"Kandinsky3Pipeline", "Kandinsky3Pipeline",
] ]
_import_structure["kolors"] = [
"KolorsPipeline",
"KolorsImg2ImgPipeline",
"ChatGLMModel",
"ChatGLMTokenizer",
]
_import_structure["latent_consistency_models"] = [ _import_structure["latent_consistency_models"] = [
"LatentConsistencyModelImg2ImgPipeline", "LatentConsistencyModelImg2ImgPipeline",
"LatentConsistencyModelPipeline", "LatentConsistencyModelPipeline",
...@@ -350,6 +345,22 @@ else: ...@@ -350,6 +345,22 @@ else:
"StableDiffusionKDiffusionPipeline", "StableDiffusionKDiffusionPipeline",
"StableDiffusionXLKDiffusionPipeline", "StableDiffusionXLKDiffusionPipeline",
] ]
try:
if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils import (
dummy_torch_and_transformers_and_sentencepiece_objects,
)
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_sentencepiece_objects))
else:
_import_structure["kolors"] = [
"KolorsPipeline",
"KolorsImg2ImgPipeline",
]
try: try:
if not is_flax_available(): if not is_flax_available():
raise OptionalDependencyNotAvailable() raise OptionalDependencyNotAvailable()
...@@ -507,12 +518,6 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: ...@@ -507,12 +518,6 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
Kandinsky3Img2ImgPipeline, Kandinsky3Img2ImgPipeline,
Kandinsky3Pipeline, Kandinsky3Pipeline,
) )
from .kolors import (
ChatGLMModel,
ChatGLMTokenizer,
KolorsImg2ImgPipeline,
KolorsPipeline,
)
from .latent_consistency_models import ( from .latent_consistency_models import (
LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelImg2ImgPipeline,
LatentConsistencyModelPipeline, LatentConsistencyModelPipeline,
...@@ -642,6 +647,17 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: ...@@ -642,6 +647,17 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
StableDiffusionXLKDiffusionPipeline, StableDiffusionXLKDiffusionPipeline,
) )
try:
if not (is_torch_available() and is_transformers_available() and is_sentencepiece_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_transformers_and_sentencepiece_objects import *
else:
from .kolors import (
KolorsImg2ImgPipeline,
KolorsPipeline,
)
try: try:
if not is_flax_available(): if not is_flax_available():
raise OptionalDependencyNotAvailable() raise OptionalDependencyNotAvailable()
......
...@@ -18,6 +18,7 @@ from collections import OrderedDict ...@@ -18,6 +18,7 @@ from collections import OrderedDict
from huggingface_hub.utils import validate_hf_hub_args from huggingface_hub.utils import validate_hf_hub_args
from ..configuration_utils import ConfigMixin from ..configuration_utils import ConfigMixin
from ..utils import is_sentencepiece_available
from .aura_flow import AuraFlowPipeline from .aura_flow import AuraFlowPipeline
from .controlnet import ( from .controlnet import (
StableDiffusionControlNetImg2ImgPipeline, StableDiffusionControlNetImg2ImgPipeline,
...@@ -47,7 +48,6 @@ from .kandinsky2_2 import ( ...@@ -47,7 +48,6 @@ from .kandinsky2_2 import (
KandinskyV22Pipeline, KandinskyV22Pipeline,
) )
from .kandinsky3 import Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline from .kandinsky3 import Kandinsky3Img2ImgPipeline, Kandinsky3Pipeline
from .kolors import KolorsImg2ImgPipeline, KolorsPipeline
from .latent_consistency_models import LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline from .latent_consistency_models import LatentConsistencyModelImg2ImgPipeline, LatentConsistencyModelPipeline
from .pag import ( from .pag import (
HunyuanDiTPAGPipeline, HunyuanDiTPAGPipeline,
...@@ -103,7 +103,6 @@ AUTO_TEXT2IMAGE_PIPELINES_MAPPING = OrderedDict( ...@@ -103,7 +103,6 @@ AUTO_TEXT2IMAGE_PIPELINES_MAPPING = OrderedDict(
("stable-diffusion-xl-controlnet-pag", StableDiffusionXLControlNetPAGPipeline), ("stable-diffusion-xl-controlnet-pag", StableDiffusionXLControlNetPAGPipeline),
("pixart-sigma-pag", PixArtSigmaPAGPipeline), ("pixart-sigma-pag", PixArtSigmaPAGPipeline),
("auraflow", AuraFlowPipeline), ("auraflow", AuraFlowPipeline),
("kolors", KolorsPipeline),
("flux", FluxPipeline), ("flux", FluxPipeline),
] ]
) )
...@@ -121,7 +120,6 @@ AUTO_IMAGE2IMAGE_PIPELINES_MAPPING = OrderedDict( ...@@ -121,7 +120,6 @@ AUTO_IMAGE2IMAGE_PIPELINES_MAPPING = OrderedDict(
("stable-diffusion-xl-controlnet", StableDiffusionXLControlNetImg2ImgPipeline), ("stable-diffusion-xl-controlnet", StableDiffusionXLControlNetImg2ImgPipeline),
("stable-diffusion-xl-pag", StableDiffusionXLPAGImg2ImgPipeline), ("stable-diffusion-xl-pag", StableDiffusionXLPAGImg2ImgPipeline),
("lcm", LatentConsistencyModelImg2ImgPipeline), ("lcm", LatentConsistencyModelImg2ImgPipeline),
("kolors", KolorsImg2ImgPipeline),
] ]
) )
...@@ -160,6 +158,12 @@ _AUTO_INPAINT_DECODER_PIPELINES_MAPPING = OrderedDict( ...@@ -160,6 +158,12 @@ _AUTO_INPAINT_DECODER_PIPELINES_MAPPING = OrderedDict(
] ]
) )
if is_sentencepiece_available():
from .kolors import KolorsPipeline
AUTO_TEXT2IMAGE_PIPELINES_MAPPING["kolors"] = KolorsPipeline
AUTO_IMAGE2IMAGE_PIPELINES_MAPPING["kolors"] = KolorsPipeline
SUPPORTED_TASKS_MAPPINGS = [ SUPPORTED_TASKS_MAPPINGS = [
AUTO_TEXT2IMAGE_PIPELINES_MAPPING, AUTO_TEXT2IMAGE_PIPELINES_MAPPING,
AUTO_IMAGE2IMAGE_PIPELINES_MAPPING, AUTO_IMAGE2IMAGE_PIPELINES_MAPPING,
......
...@@ -5,6 +5,7 @@ from ...utils import ( ...@@ -5,6 +5,7 @@ from ...utils import (
OptionalDependencyNotAvailable, OptionalDependencyNotAvailable,
_LazyModule, _LazyModule,
get_objects_from_module, get_objects_from_module,
is_sentencepiece_available,
is_torch_available, is_torch_available,
is_transformers_available, is_transformers_available,
) )
...@@ -14,12 +15,12 @@ _dummy_objects = {} ...@@ -14,12 +15,12 @@ _dummy_objects = {}
_import_structure = {} _import_structure = {}
try: try:
if not (is_transformers_available() and is_torch_available()): if not (is_transformers_available() and is_torch_available()) and is_sentencepiece_available():
raise OptionalDependencyNotAvailable() raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable: except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403 from ...utils import dummy_torch_and_transformers_and_sentencepiece_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_and_sentencepiece_objects))
else: else:
_import_structure["pipeline_kolors"] = ["KolorsPipeline"] _import_structure["pipeline_kolors"] = ["KolorsPipeline"]
_import_structure["pipeline_kolors_img2img"] = ["KolorsImg2ImgPipeline"] _import_structure["pipeline_kolors_img2img"] = ["KolorsImg2ImgPipeline"]
...@@ -28,10 +29,10 @@ else: ...@@ -28,10 +29,10 @@ else:
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try: try:
if not (is_transformers_available() and is_torch_available()): if not (is_transformers_available() and is_torch_available()) and is_sentencepiece_available():
raise OptionalDependencyNotAvailable() raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable: except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * from ...utils.dummy_torch_and_transformers_and_sentencepiece_objects import *
else: else:
from .pipeline_kolors import KolorsPipeline from .pipeline_kolors import KolorsPipeline
......
...@@ -78,6 +78,7 @@ from .import_utils import ( ...@@ -78,6 +78,7 @@ from .import_utils import (
is_peft_version, is_peft_version,
is_safetensors_available, is_safetensors_available,
is_scipy_available, is_scipy_available,
is_sentencepiece_available,
is_tensorboard_available, is_tensorboard_available,
is_timm_available, is_timm_available,
is_torch_available, is_torch_available,
......
# This file is autogenerated by the command `make fix-copies`, do not edit.
from ..utils import DummyObject, requires_backends
class KolorsImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers", "sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers", "sentencepiece"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers", "sentencepiece"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers", "sentencepiece"])
class KolorsPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers", "sentencepiece"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers", "sentencepiece"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers", "sentencepiece"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers", "sentencepiece"])
...@@ -242,36 +242,6 @@ class AuraFlowPipeline(metaclass=DummyObject): ...@@ -242,36 +242,6 @@ class AuraFlowPipeline(metaclass=DummyObject):
requires_backends(cls, ["torch", "transformers"]) requires_backends(cls, ["torch", "transformers"])
class ChatGLMModel(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class ChatGLMTokenizer(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class CLIPImageProjection(metaclass=DummyObject): class CLIPImageProjection(metaclass=DummyObject):
_backends = ["torch", "transformers"] _backends = ["torch", "transformers"]
...@@ -767,36 +737,6 @@ class KandinskyV22PriorPipeline(metaclass=DummyObject): ...@@ -767,36 +737,6 @@ class KandinskyV22PriorPipeline(metaclass=DummyObject):
requires_backends(cls, ["torch", "transformers"]) requires_backends(cls, ["torch", "transformers"])
class KolorsImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class KolorsPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class LatentConsistencyModelImg2ImgPipeline(metaclass=DummyObject): class LatentConsistencyModelImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"] _backends = ["torch", "transformers"]
......
...@@ -294,6 +294,13 @@ try: ...@@ -294,6 +294,13 @@ try:
except importlib_metadata.PackageNotFoundError: except importlib_metadata.PackageNotFoundError:
_torchvision_available = False _torchvision_available = False
_sentencepiece_available = importlib.util.find_spec("sentencepiece") is not None
try:
_sentencepiece_version = importlib_metadata.version("sentencepiece")
logger.info(f"Successfully imported sentencepiece version {_sentencepiece_version}")
except importlib_metadata.PackageNotFoundError:
_sentencepiece_available = False
_matplotlib_available = importlib.util.find_spec("matplotlib") is not None _matplotlib_available = importlib.util.find_spec("matplotlib") is not None
try: try:
_matplotlib_version = importlib_metadata.version("matplotlib") _matplotlib_version = importlib_metadata.version("matplotlib")
...@@ -436,6 +443,10 @@ def is_google_colab(): ...@@ -436,6 +443,10 @@ def is_google_colab():
return _is_google_colab return _is_google_colab
def is_sentencepiece_available():
return _sentencepiece_available
# docstyle-ignore # docstyle-ignore
FLAX_IMPORT_ERROR = """ FLAX_IMPORT_ERROR = """
{0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the {0} requires the FLAX library but it was not found in your environment. Checkout the instructions on the
...@@ -553,6 +564,12 @@ SAFETENSORS_IMPORT_ERROR = """ ...@@ -553,6 +564,12 @@ SAFETENSORS_IMPORT_ERROR = """
{0} requires the safetensors library but it was not found in your environment. You can install it with pip: `pip install safetensors` {0} requires the safetensors library but it was not found in your environment. You can install it with pip: `pip install safetensors`
""" """
# docstyle-ignore
SENTENCEPIECE_IMPORT_ERROR = """
{0} requires the sentencepiece library but it was not found in your environment. You can install it with pip: `pip install sentencepiece`
"""
# docstyle-ignore # docstyle-ignore
BITSANDBYTES_IMPORT_ERROR = """ BITSANDBYTES_IMPORT_ERROR = """
{0} requires the bitsandbytes library but it was not found in your environment. You can install it with pip: `pip install bitsandbytes` {0} requires the bitsandbytes library but it was not found in your environment. You can install it with pip: `pip install bitsandbytes`
...@@ -581,6 +598,7 @@ BACKENDS_MAPPING = OrderedDict( ...@@ -581,6 +598,7 @@ BACKENDS_MAPPING = OrderedDict(
("peft", (is_peft_available, PEFT_IMPORT_ERROR)), ("peft", (is_peft_available, PEFT_IMPORT_ERROR)),
("safetensors", (is_safetensors_available, SAFETENSORS_IMPORT_ERROR)), ("safetensors", (is_safetensors_available, SAFETENSORS_IMPORT_ERROR)),
("bitsandbytes", (is_bitsandbytes_available, BITSANDBYTES_IMPORT_ERROR)), ("bitsandbytes", (is_bitsandbytes_available, BITSANDBYTES_IMPORT_ERROR)),
("sentencepiece", (is_sentencepiece_available, SENTENCEPIECE_IMPORT_ERROR)),
] ]
) )
......
...@@ -20,12 +20,11 @@ import torch ...@@ -20,12 +20,11 @@ import torch
from diffusers import ( from diffusers import (
AutoencoderKL, AutoencoderKL,
ChatGLMModel,
ChatGLMTokenizer,
EulerDiscreteScheduler, EulerDiscreteScheduler,
KolorsPipeline, KolorsPipeline,
UNet2DConditionModel, UNet2DConditionModel,
) )
from diffusers.pipelines.kolors import ChatGLMModel, ChatGLMTokenizer
from diffusers.utils.testing_utils import enable_full_determinism from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import ( from ..pipeline_params import (
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment