Unverified Commit a0c54828 authored by Dhruv Nair's avatar Dhruv Nair Committed by GitHub
Browse files

Deprecate Pipelines (#6169)



* deprecate pipe

* make style

* update

* add deprecation message

* format

* remove tests for deprecated pipelines

* remove deprecation message

* make style

* fix copies

* clean up

* clean

* clean

* clean

* clean up

* clean up

* clean up toctree

* clean up

---------
Co-authored-by: default avatarPatrick von Platen <patrick.v.platen@gmail.com>
parent 8d891e6e
...@@ -19,14 +19,14 @@ import torch ...@@ -19,14 +19,14 @@ import torch
from packaging import version from packaging import version
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, XLMRobertaTokenizer from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, XLMRobertaTokenizer
from ...configuration_utils import FrozenDict from ....configuration_utils import FrozenDict
from ...image_processor import PipelineImageInput, VaeImageProcessor from ....image_processor import PipelineImageInput, VaeImageProcessor
from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin from ....loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel
from ...models.attention_processor import FusedAttnProcessor2_0 from ....models.attention_processor import FusedAttnProcessor2_0
from ...models.lora import adjust_lora_scale_text_encoder from ....models.lora import adjust_lora_scale_text_encoder
from ...schedulers import KarrasDiffusionSchedulers from ....schedulers import KarrasDiffusionSchedulers
from ...utils import ( from ....utils import (
USE_PEFT_BACKEND, USE_PEFT_BACKEND,
deprecate, deprecate,
logging, logging,
...@@ -34,9 +34,9 @@ from ...utils import ( ...@@ -34,9 +34,9 @@ from ...utils import (
scale_lora_layers, scale_lora_layers,
unscale_lora_layers, unscale_lora_layers,
) )
from ...utils.torch_utils import randn_tensor from ....utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline from ...pipeline_utils import DiffusionPipeline
from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from .modeling_roberta_series import RobertaSeriesModelWithTransformation from .modeling_roberta_series import RobertaSeriesModelWithTransformation
from .pipeline_output import AltDiffusionPipelineOutput from .pipeline_output import AltDiffusionPipelineOutput
...@@ -119,7 +119,6 @@ def retrieve_timesteps( ...@@ -119,7 +119,6 @@ def retrieve_timesteps(
return timesteps, num_inference_steps return timesteps, num_inference_steps
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline with Stable->Alt, CLIPTextModel->RobertaSeriesModelWithTransformation, CLIPTokenizer->XLMRobertaTokenizer, AltDiffusionSafetyChecker->StableDiffusionSafetyChecker
class AltDiffusionPipeline( class AltDiffusionPipeline(
DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin
): ):
......
...@@ -21,14 +21,14 @@ import torch ...@@ -21,14 +21,14 @@ import torch
from packaging import version from packaging import version
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, XLMRobertaTokenizer from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection, XLMRobertaTokenizer
from ...configuration_utils import FrozenDict from ....configuration_utils import FrozenDict
from ...image_processor import PipelineImageInput, VaeImageProcessor from ....image_processor import PipelineImageInput, VaeImageProcessor
from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin from ....loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
from ...models import AutoencoderKL, ImageProjection, UNet2DConditionModel from ....models import AutoencoderKL, ImageProjection, UNet2DConditionModel
from ...models.attention_processor import FusedAttnProcessor2_0 from ....models.attention_processor import FusedAttnProcessor2_0
from ...models.lora import adjust_lora_scale_text_encoder from ....models.lora import adjust_lora_scale_text_encoder
from ...schedulers import KarrasDiffusionSchedulers from ....schedulers import KarrasDiffusionSchedulers
from ...utils import ( from ....utils import (
PIL_INTERPOLATION, PIL_INTERPOLATION,
USE_PEFT_BACKEND, USE_PEFT_BACKEND,
deprecate, deprecate,
...@@ -37,9 +37,9 @@ from ...utils import ( ...@@ -37,9 +37,9 @@ from ...utils import (
scale_lora_layers, scale_lora_layers,
unscale_lora_layers, unscale_lora_layers,
) )
from ...utils.torch_utils import randn_tensor from ....utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline from ...pipeline_utils import DiffusionPipeline
from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker from ...stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from .modeling_roberta_series import RobertaSeriesModelWithTransformation from .modeling_roberta_series import RobertaSeriesModelWithTransformation
from .pipeline_output import AltDiffusionPipelineOutput from .pipeline_output import AltDiffusionPipelineOutput
...@@ -159,7 +159,6 @@ def retrieve_timesteps( ...@@ -159,7 +159,6 @@ def retrieve_timesteps(
return timesteps, num_inference_steps return timesteps, num_inference_steps
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline with Stable->Alt, CLIPTextModel->RobertaSeriesModelWithTransformation, CLIPTokenizer->XLMRobertaTokenizer, AltDiffusionSafetyChecker->StableDiffusionSafetyChecker
class AltDiffusionImg2ImgPipeline( class AltDiffusionImg2ImgPipeline(
DiffusionPipeline, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin, FromSingleFileMixin DiffusionPipeline, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin, FromSingleFileMixin
): ):
......
...@@ -4,7 +4,7 @@ from typing import List, Optional, Union ...@@ -4,7 +4,7 @@ from typing import List, Optional, Union
import numpy as np import numpy as np
import PIL.Image import PIL.Image
from ...utils import ( from ....utils import (
BaseOutput, BaseOutput,
) )
......
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule
_import_structure = { _import_structure = {
......
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
import numpy as np # noqa: E402 import numpy as np # noqa: E402
from ...configuration_utils import ConfigMixin, register_to_config from ....configuration_utils import ConfigMixin, register_to_config
from ...schedulers.scheduling_utils import SchedulerMixin from ....schedulers.scheduling_utils import SchedulerMixin
try: try:
......
...@@ -20,10 +20,10 @@ import numpy as np ...@@ -20,10 +20,10 @@ import numpy as np
import torch import torch
from PIL import Image from PIL import Image
from ...models import AutoencoderKL, UNet2DConditionModel from ....models import AutoencoderKL, UNet2DConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler from ....schedulers import DDIMScheduler, DDPMScheduler
from ...utils.torch_utils import randn_tensor from ....utils.torch_utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput from ...pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel from .mel import Mel
......
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule
_import_structure = {"pipeline_latent_diffusion_uncond": ["LDMPipeline"]} _import_structure = {"pipeline_latent_diffusion_uncond": ["LDMPipeline"]}
......
...@@ -17,10 +17,10 @@ from typing import List, Optional, Tuple, Union ...@@ -17,10 +17,10 @@ from typing import List, Optional, Tuple, Union
import torch import torch
from ...models import UNet2DModel, VQModel from ....models import UNet2DModel, VQModel
from ...schedulers import DDIMScheduler from ....schedulers import DDIMScheduler
from ...utils.torch_utils import randn_tensor from ....utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class LDMPipeline(DiffusionPipeline): class LDMPipeline(DiffusionPipeline):
......
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule
_import_structure = {"pipeline_pndm": ["PNDMPipeline"]} _import_structure = {"pipeline_pndm": ["PNDMPipeline"]}
......
...@@ -17,10 +17,10 @@ from typing import List, Optional, Tuple, Union ...@@ -17,10 +17,10 @@ from typing import List, Optional, Tuple, Union
import torch import torch
from ...models import UNet2DModel from ....models import UNet2DModel
from ...schedulers import PNDMScheduler from ....schedulers import PNDMScheduler
from ...utils.torch_utils import randn_tensor from ....utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class PNDMPipeline(DiffusionPipeline): class PNDMPipeline(DiffusionPipeline):
......
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule
_import_structure = {"pipeline_repaint": ["RePaintPipeline"]} _import_structure = {"pipeline_repaint": ["RePaintPipeline"]}
......
...@@ -19,11 +19,11 @@ import numpy as np ...@@ -19,11 +19,11 @@ import numpy as np
import PIL.Image import PIL.Image
import torch import torch
from ...models import UNet2DModel from ....models import UNet2DModel
from ...schedulers import RePaintScheduler from ....schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, deprecate, logging from ....utils import PIL_INTERPOLATION, deprecate, logging
from ...utils.torch_utils import randn_tensor from ....utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
logger = logging.get_logger(__name__) # pylint: disable=invalid-name logger = logging.get_logger(__name__) # pylint: disable=invalid-name
......
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
from ...utils import DIFFUSERS_SLOW_IMPORT, _LazyModule from ....utils import DIFFUSERS_SLOW_IMPORT, _LazyModule
_import_structure = {"pipeline_score_sde_ve": ["ScoreSdeVePipeline"]} _import_structure = {"pipeline_score_sde_ve": ["ScoreSdeVePipeline"]}
......
...@@ -16,10 +16,10 @@ from typing import List, Optional, Tuple, Union ...@@ -16,10 +16,10 @@ from typing import List, Optional, Tuple, Union
import torch import torch
from ...models import UNet2DModel from ....models import UNet2DModel
from ...schedulers import ScoreSdeVeScheduler from ....schedulers import ScoreSdeVeScheduler
from ...utils.torch_utils import randn_tensor from ....utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from ...pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class ScoreSdeVePipeline(DiffusionPipeline): class ScoreSdeVePipeline(DiffusionPipeline):
......
# flake8: noqa # flake8: noqa
from typing import TYPE_CHECKING from typing import TYPE_CHECKING
from ...utils import DIFFUSERS_SLOW_IMPORT from ....utils import (
from ...utils import ( DIFFUSERS_SLOW_IMPORT,
_LazyModule, _LazyModule,
is_note_seq_available, is_note_seq_available,
OptionalDependencyNotAvailable, OptionalDependencyNotAvailable,
...@@ -17,7 +17,7 @@ try: ...@@ -17,7 +17,7 @@ try:
if not (is_transformers_available() and is_torch_available()): if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable() raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable: except OptionalDependencyNotAvailable:
from ...utils import dummy_torch_and_transformers_objects # noqa F403 from ....utils import dummy_torch_and_transformers_objects # noqa F403
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects)) _dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else: else:
...@@ -32,7 +32,7 @@ try: ...@@ -32,7 +32,7 @@ try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable() raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable: except OptionalDependencyNotAvailable:
from ...utils import dummy_transformers_and_torch_and_note_seq_objects from ....utils import dummy_transformers_and_torch_and_note_seq_objects
_dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects)) _dummy_objects.update(get_objects_from_module(dummy_transformers_and_torch_and_note_seq_objects))
else: else:
...@@ -45,7 +45,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: ...@@ -45,7 +45,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
raise OptionalDependencyNotAvailable() raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable: except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * from ....utils.dummy_torch_and_transformers_objects import *
else: else:
from .pipeline_spectrogram_diffusion import SpectrogramDiffusionPipeline from .pipeline_spectrogram_diffusion import SpectrogramDiffusionPipeline
from .pipeline_spectrogram_diffusion import SpectrogramContEncoder from .pipeline_spectrogram_diffusion import SpectrogramContEncoder
...@@ -56,7 +56,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT: ...@@ -56,7 +56,7 @@ if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable() raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable: except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * from ....utils.dummy_transformers_and_torch_and_note_seq_objects import *
else: else:
from .midi_utils import MidiProcessor from .midi_utils import MidiProcessor
......
...@@ -22,8 +22,8 @@ from transformers.models.t5.modeling_t5 import ( ...@@ -22,8 +22,8 @@ from transformers.models.t5.modeling_t5 import (
T5LayerNorm, T5LayerNorm,
) )
from ...configuration_utils import ConfigMixin, register_to_config from ....configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin from ....models import ModelMixin
class SpectrogramContEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): class SpectrogramContEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin):
......
...@@ -22,7 +22,7 @@ import numpy as np ...@@ -22,7 +22,7 @@ import numpy as np
import torch import torch
import torch.nn.functional as F import torch.nn.functional as F
from ...utils import is_note_seq_available from ....utils import is_note_seq_available
from .pipeline_spectrogram_diffusion import TARGET_FEATURE_LENGTH from .pipeline_spectrogram_diffusion import TARGET_FEATURE_LENGTH
......
...@@ -18,8 +18,8 @@ import torch.nn as nn ...@@ -18,8 +18,8 @@ import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.t5.modeling_t5 import T5Block, T5Config, T5LayerNorm from transformers.models.t5.modeling_t5 import T5Block, T5Config, T5LayerNorm
from ...configuration_utils import ConfigMixin, register_to_config from ....configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin from ....models import ModelMixin
class SpectrogramNotesEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin): class SpectrogramNotesEncoder(ModelMixin, ConfigMixin, ModuleUtilsMixin):
......
...@@ -19,16 +19,16 @@ from typing import Any, Callable, List, Optional, Tuple, Union ...@@ -19,16 +19,16 @@ from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np import numpy as np
import torch import torch
from ...models import T5FilmDecoder from ....models import T5FilmDecoder
from ...schedulers import DDPMScheduler from ....schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging from ....utils import is_onnx_available, logging
from ...utils.torch_utils import randn_tensor from ....utils.torch_utils import randn_tensor
if is_onnx_available(): if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel from ...onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from ...pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continuous_encoder import SpectrogramContEncoder from .continuous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder from .notes_encoder import SpectrogramNotesEncoder
......
from typing import TYPE_CHECKING
from ....utils import (
DIFFUSERS_SLOW_IMPORT,
OptionalDependencyNotAvailable,
_LazyModule,
get_objects_from_module,
is_torch_available,
is_transformers_available,
)
_dummy_objects = {}
_import_structure = {}
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ....utils import dummy_torch_and_transformers_objects
_dummy_objects.update(get_objects_from_module(dummy_torch_and_transformers_objects))
else:
_import_structure["pipeline_cycle_diffusion"] = ["CycleDiffusionPipeline"]
_import_structure["pipeline_stable_diffusion_inpaint_legacy"] = ["StableDiffusionInpaintPipelineLegacy"]
_import_structure["pipeline_stable_diffusion_model_editing"] = ["StableDiffusionModelEditingPipeline"]
_import_structure["pipeline_stable_diffusion_paradigms"] = ["StableDiffusionParadigmsPipeline"]
_import_structure["pipeline_stable_diffusion_pix2pix_zero"] = ["StableDiffusionPix2PixZeroPipeline"]
if TYPE_CHECKING or DIFFUSERS_SLOW_IMPORT:
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ....utils.dummy_torch_and_transformers_objects import *
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_pix2pix_zero import StableDiffusionPix2PixZeroPipeline
else:
import sys
sys.modules[__name__] = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
module_spec=__spec__,
)
for name, value in _dummy_objects.items():
setattr(sys.modules[__name__], name, value)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment