Unverified Commit 5e181edd authored by Sayak Paul's avatar Sayak Paul Committed by GitHub
Browse files

Deprecate slicing and tiling methods from `DiffusionPipeline` (#12271)

* deprecate slicing from flux pipeline.

* propagate.

* tiling

* up

* up
parent 55f0b3d7
...@@ -1705,6 +1705,12 @@ class FaithDiffStableDiffusionXLPipeline( ...@@ -1705,6 +1705,12 @@ class FaithDiffStableDiffusionXLPipeline(
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images. processing larger images.
""" """
depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`."
deprecate(
"enable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.enable_tiling() self.vae.enable_tiling()
self.unet.denoise_encoder.enable_tiling() self.unet.denoise_encoder.enable_tiling()
...@@ -1713,6 +1719,12 @@ class FaithDiffStableDiffusionXLPipeline( ...@@ -1713,6 +1719,12 @@ class FaithDiffStableDiffusionXLPipeline(
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`."
deprecate(
"disable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.disable_tiling() self.vae.disable_tiling()
self.unet.denoise_encoder.disable_tiling() self.unet.denoise_encoder.disable_tiling()
......
...@@ -35,6 +35,7 @@ from diffusers.pipelines.pipeline_utils import DiffusionPipeline ...@@ -35,6 +35,7 @@ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
from diffusers.utils import ( from diffusers.utils import (
USE_PEFT_BACKEND, USE_PEFT_BACKEND,
deprecate,
is_torch_xla_available, is_torch_xla_available,
logging, logging,
replace_example_docstring, replace_example_docstring,
...@@ -643,6 +644,12 @@ class FluxKontextPipeline( ...@@ -643,6 +644,12 @@ class FluxKontextPipeline(
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images. processing larger images.
""" """
depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`."
deprecate(
"enable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.enable_tiling() self.vae.enable_tiling()
# Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.disable_vae_tiling # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.disable_vae_tiling
...@@ -651,6 +658,12 @@ class FluxKontextPipeline( ...@@ -651,6 +658,12 @@ class FluxKontextPipeline(
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`."
deprecate(
"disable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.disable_tiling() self.vae.disable_tiling()
def preprocess_image(self, image: PipelineImageInput, _auto_resize: bool, multiple_of: int) -> torch.Tensor: def preprocess_image(self, image: PipelineImageInput, _auto_resize: bool, multiple_of: int) -> torch.Tensor:
......
...@@ -30,6 +30,7 @@ from diffusers.pipelines.pipeline_utils import DiffusionPipeline ...@@ -30,6 +30,7 @@ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
from diffusers.utils import ( from diffusers.utils import (
USE_PEFT_BACKEND, USE_PEFT_BACKEND,
deprecate,
is_torch_xla_available, is_torch_xla_available,
logging, logging,
replace_example_docstring, replace_example_docstring,
...@@ -526,6 +527,12 @@ class RFInversionFluxPipeline( ...@@ -526,6 +527,12 @@ class RFInversionFluxPipeline(
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
""" """
depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`."
deprecate(
"enable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.enable_slicing() self.vae.enable_slicing()
def disable_vae_slicing(self): def disable_vae_slicing(self):
...@@ -533,6 +540,12 @@ class RFInversionFluxPipeline( ...@@ -533,6 +540,12 @@ class RFInversionFluxPipeline(
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`."
deprecate(
"disable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.disable_slicing() self.vae.disable_slicing()
def enable_vae_tiling(self): def enable_vae_tiling(self):
...@@ -541,6 +554,12 @@ class RFInversionFluxPipeline( ...@@ -541,6 +554,12 @@ class RFInversionFluxPipeline(
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images. processing larger images.
""" """
depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`."
deprecate(
"enable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.enable_tiling() self.vae.enable_tiling()
def disable_vae_tiling(self): def disable_vae_tiling(self):
...@@ -548,6 +567,12 @@ class RFInversionFluxPipeline( ...@@ -548,6 +567,12 @@ class RFInversionFluxPipeline(
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`."
deprecate(
"disable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.disable_tiling() self.vae.disable_tiling()
def prepare_latents_inversion( def prepare_latents_inversion(
......
...@@ -35,6 +35,7 @@ from diffusers.pipelines.pipeline_utils import DiffusionPipeline ...@@ -35,6 +35,7 @@ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
from diffusers.utils import ( from diffusers.utils import (
USE_PEFT_BACKEND, USE_PEFT_BACKEND,
deprecate,
is_torch_xla_available, is_torch_xla_available,
logging, logging,
replace_example_docstring, replace_example_docstring,
...@@ -702,6 +703,12 @@ class FluxSemanticGuidancePipeline( ...@@ -702,6 +703,12 @@ class FluxSemanticGuidancePipeline(
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images. processing larger images.
""" """
depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`."
deprecate(
"enable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.enable_tiling() self.vae.enable_tiling()
# Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.disable_vae_tiling # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.disable_vae_tiling
...@@ -710,6 +717,12 @@ class FluxSemanticGuidancePipeline( ...@@ -710,6 +717,12 @@ class FluxSemanticGuidancePipeline(
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`."
deprecate(
"disable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.disable_tiling() self.vae.disable_tiling()
# Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.prepare_latents # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.prepare_latents
......
...@@ -28,6 +28,7 @@ from diffusers.pipelines.pipeline_utils import DiffusionPipeline ...@@ -28,6 +28,7 @@ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
from diffusers.utils import ( from diffusers.utils import (
USE_PEFT_BACKEND, USE_PEFT_BACKEND,
deprecate,
is_torch_xla_available, is_torch_xla_available,
logging, logging,
replace_example_docstring, replace_example_docstring,
...@@ -503,6 +504,12 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi ...@@ -503,6 +504,12 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
""" """
depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`."
deprecate(
"enable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.enable_slicing() self.vae.enable_slicing()
def disable_vae_slicing(self): def disable_vae_slicing(self):
...@@ -510,6 +517,12 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi ...@@ -510,6 +517,12 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`."
deprecate(
"disable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.disable_slicing() self.vae.disable_slicing()
def enable_vae_tiling(self): def enable_vae_tiling(self):
...@@ -518,6 +531,12 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi ...@@ -518,6 +531,12 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images. processing larger images.
""" """
depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`."
deprecate(
"enable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.enable_tiling() self.vae.enable_tiling()
def disable_vae_tiling(self): def disable_vae_tiling(self):
...@@ -525,6 +544,12 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi ...@@ -525,6 +544,12 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`."
deprecate(
"disable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.disable_tiling() self.vae.disable_tiling()
def prepare_latents( def prepare_latents(
......
...@@ -29,11 +29,7 @@ from diffusers.models.transformers import SD3Transformer2DModel ...@@ -29,11 +29,7 @@ from diffusers.models.transformers import SD3Transformer2DModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion_3.pipeline_output import StableDiffusion3PipelineOutput from diffusers.pipelines.stable_diffusion_3.pipeline_output import StableDiffusion3PipelineOutput
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
from diffusers.utils import ( from diffusers.utils import is_torch_xla_available, logging, replace_example_docstring
is_torch_xla_available,
logging,
replace_example_docstring,
)
from diffusers.utils.torch_utils import randn_tensor from diffusers.utils.torch_utils import randn_tensor
......
...@@ -504,6 +504,12 @@ class StableDiffusionBoxDiffPipeline( ...@@ -504,6 +504,12 @@ class StableDiffusionBoxDiffPipeline(
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
""" """
depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`."
deprecate(
"enable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.enable_slicing() self.vae.enable_slicing()
def disable_vae_slicing(self): def disable_vae_slicing(self):
...@@ -511,6 +517,12 @@ class StableDiffusionBoxDiffPipeline( ...@@ -511,6 +517,12 @@ class StableDiffusionBoxDiffPipeline(
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`."
deprecate(
"disable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.disable_slicing() self.vae.disable_slicing()
def enable_vae_tiling(self): def enable_vae_tiling(self):
...@@ -519,6 +531,12 @@ class StableDiffusionBoxDiffPipeline( ...@@ -519,6 +531,12 @@ class StableDiffusionBoxDiffPipeline(
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images. processing larger images.
""" """
depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`."
deprecate(
"enable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.enable_tiling() self.vae.enable_tiling()
def disable_vae_tiling(self): def disable_vae_tiling(self):
...@@ -526,6 +544,12 @@ class StableDiffusionBoxDiffPipeline( ...@@ -526,6 +544,12 @@ class StableDiffusionBoxDiffPipeline(
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`."
deprecate(
"disable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.disable_tiling() self.vae.disable_tiling()
def _encode_prompt( def _encode_prompt(
......
...@@ -471,6 +471,12 @@ class StableDiffusionPAGPipeline( ...@@ -471,6 +471,12 @@ class StableDiffusionPAGPipeline(
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
""" """
depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`."
deprecate(
"enable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.enable_slicing() self.vae.enable_slicing()
def disable_vae_slicing(self): def disable_vae_slicing(self):
...@@ -478,6 +484,12 @@ class StableDiffusionPAGPipeline( ...@@ -478,6 +484,12 @@ class StableDiffusionPAGPipeline(
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`."
deprecate(
"disable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.disable_slicing() self.vae.disable_slicing()
def enable_vae_tiling(self): def enable_vae_tiling(self):
...@@ -486,6 +498,12 @@ class StableDiffusionPAGPipeline( ...@@ -486,6 +498,12 @@ class StableDiffusionPAGPipeline(
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images. processing larger images.
""" """
depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`."
deprecate(
"enable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.enable_tiling() self.vae.enable_tiling()
def disable_vae_tiling(self): def disable_vae_tiling(self):
...@@ -493,6 +511,12 @@ class StableDiffusionPAGPipeline( ...@@ -493,6 +511,12 @@ class StableDiffusionPAGPipeline(
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`."
deprecate(
"disable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.disable_tiling() self.vae.disable_tiling()
def _encode_prompt( def _encode_prompt(
......
...@@ -26,7 +26,7 @@ from diffusers.models import AutoencoderKLHunyuanVideo, HunyuanVideoTransformer3 ...@@ -26,7 +26,7 @@ from diffusers.models import AutoencoderKLHunyuanVideo, HunyuanVideoTransformer3
from diffusers.pipelines.hunyuan_video.pipeline_output import HunyuanVideoPipelineOutput from diffusers.pipelines.hunyuan_video.pipeline_output import HunyuanVideoPipelineOutput
from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
from diffusers.utils import is_torch_xla_available, logging, replace_example_docstring from diffusers.utils import deprecate, is_torch_xla_available, logging, replace_example_docstring
from diffusers.utils.torch_utils import randn_tensor from diffusers.utils.torch_utils import randn_tensor
from diffusers.video_processor import VideoProcessor from diffusers.video_processor import VideoProcessor
...@@ -481,6 +481,12 @@ class HunyuanVideoSTGPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin): ...@@ -481,6 +481,12 @@ class HunyuanVideoSTGPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin):
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
""" """
depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`."
deprecate(
"enable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.enable_slicing() self.vae.enable_slicing()
def disable_vae_slicing(self): def disable_vae_slicing(self):
...@@ -488,6 +494,12 @@ class HunyuanVideoSTGPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin): ...@@ -488,6 +494,12 @@ class HunyuanVideoSTGPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin):
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`."
deprecate(
"disable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.disable_slicing() self.vae.disable_slicing()
def enable_vae_tiling(self): def enable_vae_tiling(self):
...@@ -496,6 +508,12 @@ class HunyuanVideoSTGPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin): ...@@ -496,6 +508,12 @@ class HunyuanVideoSTGPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin):
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images. processing larger images.
""" """
depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`."
deprecate(
"enable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.enable_tiling() self.vae.enable_tiling()
def disable_vae_tiling(self): def disable_vae_tiling(self):
...@@ -503,6 +521,12 @@ class HunyuanVideoSTGPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin): ...@@ -503,6 +521,12 @@ class HunyuanVideoSTGPipeline(DiffusionPipeline, HunyuanVideoLoraLoaderMixin):
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`."
deprecate(
"disable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.disable_tiling() self.vae.disable_tiling()
@property @property
......
...@@ -26,11 +26,7 @@ from diffusers.models import AutoencoderKLMochi, MochiTransformer3DModel ...@@ -26,11 +26,7 @@ from diffusers.models import AutoencoderKLMochi, MochiTransformer3DModel
from diffusers.pipelines.mochi.pipeline_output import MochiPipelineOutput from diffusers.pipelines.mochi.pipeline_output import MochiPipelineOutput
from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.schedulers import FlowMatchEulerDiscreteScheduler from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
from diffusers.utils import ( from diffusers.utils import deprecate, is_torch_xla_available, logging, replace_example_docstring
is_torch_xla_available,
logging,
replace_example_docstring,
)
from diffusers.utils.torch_utils import randn_tensor from diffusers.utils.torch_utils import randn_tensor
from diffusers.video_processor import VideoProcessor from diffusers.video_processor import VideoProcessor
...@@ -458,6 +454,12 @@ class MochiSTGPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin): ...@@ -458,6 +454,12 @@ class MochiSTGPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin):
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
""" """
depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`."
deprecate(
"enable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.enable_slicing() self.vae.enable_slicing()
def disable_vae_slicing(self): def disable_vae_slicing(self):
...@@ -465,6 +467,12 @@ class MochiSTGPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin): ...@@ -465,6 +467,12 @@ class MochiSTGPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin):
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`."
deprecate(
"disable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.disable_slicing() self.vae.disable_slicing()
def enable_vae_tiling(self): def enable_vae_tiling(self):
...@@ -473,6 +481,12 @@ class MochiSTGPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin): ...@@ -473,6 +481,12 @@ class MochiSTGPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin):
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images. processing larger images.
""" """
depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`."
deprecate(
"enable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.enable_tiling() self.vae.enable_tiling()
def disable_vae_tiling(self): def disable_vae_tiling(self):
...@@ -480,6 +494,12 @@ class MochiSTGPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin): ...@@ -480,6 +494,12 @@ class MochiSTGPipeline(DiffusionPipeline, Mochi1LoraLoaderMixin):
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`."
deprecate(
"disable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.disable_tiling() self.vae.disable_tiling()
def prepare_latents( def prepare_latents(
......
...@@ -263,6 +263,12 @@ class PromptDiffusionPipeline( ...@@ -263,6 +263,12 @@ class PromptDiffusionPipeline(
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images. processing larger images.
""" """
depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`."
deprecate(
"enable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.enable_tiling() self.vae.enable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling
...@@ -271,6 +277,12 @@ class PromptDiffusionPipeline( ...@@ -271,6 +277,12 @@ class PromptDiffusionPipeline(
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`."
deprecate(
"disable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.disable_tiling() self.vae.disable_tiling()
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
......
...@@ -651,6 +651,12 @@ class AllegroPipeline(DiffusionPipeline): ...@@ -651,6 +651,12 @@ class AllegroPipeline(DiffusionPipeline):
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
""" """
depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`."
deprecate(
"enable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.enable_slicing() self.vae.enable_slicing()
def disable_vae_slicing(self): def disable_vae_slicing(self):
...@@ -658,6 +664,12 @@ class AllegroPipeline(DiffusionPipeline): ...@@ -658,6 +664,12 @@ class AllegroPipeline(DiffusionPipeline):
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`."
deprecate(
"disable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.disable_slicing() self.vae.disable_slicing()
def enable_vae_tiling(self): def enable_vae_tiling(self):
...@@ -666,6 +678,12 @@ class AllegroPipeline(DiffusionPipeline): ...@@ -666,6 +678,12 @@ class AllegroPipeline(DiffusionPipeline):
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images. processing larger images.
""" """
depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`."
deprecate(
"enable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.enable_tiling() self.vae.enable_tiling()
def disable_vae_tiling(self): def disable_vae_tiling(self):
...@@ -673,6 +691,12 @@ class AllegroPipeline(DiffusionPipeline): ...@@ -673,6 +691,12 @@ class AllegroPipeline(DiffusionPipeline):
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`."
deprecate(
"disable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.disable_tiling() self.vae.disable_tiling()
@property @property
......
...@@ -34,6 +34,7 @@ from transformers import ( ...@@ -34,6 +34,7 @@ from transformers import (
from ...models import AutoencoderKL from ...models import AutoencoderKL
from ...schedulers import KarrasDiffusionSchedulers from ...schedulers import KarrasDiffusionSchedulers
from ...utils import ( from ...utils import (
deprecate,
is_accelerate_available, is_accelerate_available,
is_accelerate_version, is_accelerate_version,
is_librosa_available, is_librosa_available,
...@@ -228,6 +229,12 @@ class AudioLDM2Pipeline(DiffusionPipeline): ...@@ -228,6 +229,12 @@ class AudioLDM2Pipeline(DiffusionPipeline):
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
""" """
depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`."
deprecate(
"enable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.enable_slicing() self.vae.enable_slicing()
# Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_slicing # Copied from diffusers.pipelines.pipeline_utils.StableDiffusionMixin.disable_vae_slicing
...@@ -236,6 +243,12 @@ class AudioLDM2Pipeline(DiffusionPipeline): ...@@ -236,6 +243,12 @@ class AudioLDM2Pipeline(DiffusionPipeline):
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`."
deprecate(
"disable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.disable_slicing() self.vae.disable_slicing()
def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"): def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = "cuda"):
......
...@@ -19,11 +19,7 @@ from transformers import CLIPTokenizer ...@@ -19,11 +19,7 @@ from transformers import CLIPTokenizer
from ...models import AutoencoderKL, UNet2DConditionModel from ...models import AutoencoderKL, UNet2DConditionModel
from ...schedulers import PNDMScheduler from ...schedulers import PNDMScheduler
from ...utils import ( from ...utils import is_torch_xla_available, logging, replace_example_docstring
is_torch_xla_available,
logging,
replace_example_docstring,
)
from ...utils.torch_utils import randn_tensor from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput from ..pipeline_utils import DeprecatedPipelineMixin, DiffusionPipeline, ImagePipelineOutput
from .blip_image_processing import BlipImageProcessor from .blip_image_processing import BlipImageProcessor
......
...@@ -25,6 +25,7 @@ from ...models import AutoencoderKL, ChromaTransformer2DModel ...@@ -25,6 +25,7 @@ from ...models import AutoencoderKL, ChromaTransformer2DModel
from ...schedulers import FlowMatchEulerDiscreteScheduler from ...schedulers import FlowMatchEulerDiscreteScheduler
from ...utils import ( from ...utils import (
USE_PEFT_BACKEND, USE_PEFT_BACKEND,
deprecate,
is_torch_xla_available, is_torch_xla_available,
logging, logging,
replace_example_docstring, replace_example_docstring,
...@@ -508,6 +509,12 @@ class ChromaPipeline( ...@@ -508,6 +509,12 @@ class ChromaPipeline(
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
""" """
depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`."
deprecate(
"enable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.enable_slicing() self.vae.enable_slicing()
def disable_vae_slicing(self): def disable_vae_slicing(self):
...@@ -515,6 +522,12 @@ class ChromaPipeline( ...@@ -515,6 +522,12 @@ class ChromaPipeline(
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`."
deprecate(
"disable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.disable_slicing() self.vae.disable_slicing()
def enable_vae_tiling(self): def enable_vae_tiling(self):
...@@ -523,6 +536,12 @@ class ChromaPipeline( ...@@ -523,6 +536,12 @@ class ChromaPipeline(
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images. processing larger images.
""" """
depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`."
deprecate(
"enable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.enable_tiling() self.vae.enable_tiling()
def disable_vae_tiling(self): def disable_vae_tiling(self):
...@@ -530,6 +549,12 @@ class ChromaPipeline( ...@@ -530,6 +549,12 @@ class ChromaPipeline(
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`."
deprecate(
"disable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.disable_tiling() self.vae.disable_tiling()
# Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.prepare_latents # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.prepare_latents
......
...@@ -25,6 +25,7 @@ from ...models import AutoencoderKL, ChromaTransformer2DModel ...@@ -25,6 +25,7 @@ from ...models import AutoencoderKL, ChromaTransformer2DModel
from ...schedulers import FlowMatchEulerDiscreteScheduler from ...schedulers import FlowMatchEulerDiscreteScheduler
from ...utils import ( from ...utils import (
USE_PEFT_BACKEND, USE_PEFT_BACKEND,
deprecate,
is_torch_xla_available, is_torch_xla_available,
logging, logging,
replace_example_docstring, replace_example_docstring,
...@@ -542,6 +543,12 @@ class ChromaImg2ImgPipeline( ...@@ -542,6 +543,12 @@ class ChromaImg2ImgPipeline(
Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
compute decoding in several steps. This is useful to save some memory and allow larger batch sizes. compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
""" """
depr_message = f"Calling `enable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_slicing()`."
deprecate(
"enable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.enable_slicing() self.vae.enable_slicing()
def disable_vae_slicing(self): def disable_vae_slicing(self):
...@@ -549,6 +556,12 @@ class ChromaImg2ImgPipeline( ...@@ -549,6 +556,12 @@ class ChromaImg2ImgPipeline(
Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_slicing()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_slicing()`."
deprecate(
"disable_vae_slicing",
"0.40.0",
depr_message,
)
self.vae.disable_slicing() self.vae.disable_slicing()
def enable_vae_tiling(self): def enable_vae_tiling(self):
...@@ -557,6 +570,12 @@ class ChromaImg2ImgPipeline( ...@@ -557,6 +570,12 @@ class ChromaImg2ImgPipeline(
compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
processing larger images. processing larger images.
""" """
depr_message = f"Calling `enable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.enable_tiling()`."
deprecate(
"enable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.enable_tiling() self.vae.enable_tiling()
def disable_vae_tiling(self): def disable_vae_tiling(self):
...@@ -564,6 +583,12 @@ class ChromaImg2ImgPipeline( ...@@ -564,6 +583,12 @@ class ChromaImg2ImgPipeline(
Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
computing decoding in one step. computing decoding in one step.
""" """
depr_message = f"Calling `disable_vae_tiling()` on a `{self.__class__.__name__}` is deprecated and this method will be removed in a future version. Please use `pipe.vae.disable_tiling()`."
deprecate(
"disable_vae_tiling",
"0.40.0",
depr_message,
)
self.vae.disable_tiling() self.vae.disable_tiling()
# Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3_img2img.StableDiffusion3Img2ImgPipeline.get_timesteps
......
...@@ -28,11 +28,7 @@ from ...models import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel ...@@ -28,11 +28,7 @@ from ...models import AutoencoderKLCogVideoX, CogVideoXTransformer3DModel
from ...models.embeddings import get_3d_rotary_pos_embed from ...models.embeddings import get_3d_rotary_pos_embed
from ...pipelines.pipeline_utils import DiffusionPipeline from ...pipelines.pipeline_utils import DiffusionPipeline
from ...schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler from ...schedulers import CogVideoXDDIMScheduler, CogVideoXDPMScheduler
from ...utils import ( from ...utils import is_torch_xla_available, logging, replace_example_docstring
is_torch_xla_available,
logging,
replace_example_docstring,
)
from ...utils.torch_utils import randn_tensor from ...utils.torch_utils import randn_tensor
from ...video_processor import VideoProcessor from ...video_processor import VideoProcessor
from .pipeline_output import CogVideoXPipelineOutput from .pipeline_output import CogVideoXPipelineOutput
......
...@@ -18,11 +18,7 @@ import torch ...@@ -18,11 +18,7 @@ import torch
from ...models import UNet2DModel from ...models import UNet2DModel
from ...schedulers import CMStochasticIterativeScheduler from ...schedulers import CMStochasticIterativeScheduler
from ...utils import ( from ...utils import is_torch_xla_available, logging, replace_example_docstring
is_torch_xla_available,
logging,
replace_example_docstring,
)
from ...utils.torch_utils import randn_tensor from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
......
...@@ -20,11 +20,7 @@ from transformers import CLIPTokenizer ...@@ -20,11 +20,7 @@ from transformers import CLIPTokenizer
from ...models import AutoencoderKL, ControlNetModel, UNet2DConditionModel from ...models import AutoencoderKL, ControlNetModel, UNet2DConditionModel
from ...schedulers import PNDMScheduler from ...schedulers import PNDMScheduler
from ...utils import ( from ...utils import is_torch_xla_available, logging, replace_example_docstring
is_torch_xla_available,
logging,
replace_example_docstring,
)
from ...utils.torch_utils import randn_tensor from ...utils.torch_utils import randn_tensor
from ..blip_diffusion.blip_image_processing import BlipImageProcessor from ..blip_diffusion.blip_image_processing import BlipImageProcessor
from ..blip_diffusion.modeling_blip2 import Blip2QFormerModel from ..blip_diffusion.modeling_blip2 import Blip2QFormerModel
......
...@@ -27,11 +27,7 @@ from ...models import AutoencoderKL, HunyuanDiT2DControlNetModel, HunyuanDiT2DMo ...@@ -27,11 +27,7 @@ from ...models import AutoencoderKL, HunyuanDiT2DControlNetModel, HunyuanDiT2DMo
from ...models.embeddings import get_2d_rotary_pos_embed from ...models.embeddings import get_2d_rotary_pos_embed
from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from ...pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from ...schedulers import DDPMScheduler from ...schedulers import DDPMScheduler
from ...utils import ( from ...utils import is_torch_xla_available, logging, replace_example_docstring
is_torch_xla_available,
logging,
replace_example_docstring,
)
from ...utils.torch_utils import randn_tensor from ...utils.torch_utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline from ..pipeline_utils import DiffusionPipeline
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment