Unverified Commit b6e0b016 authored by Dhruv Nair's avatar Dhruv Nair Committed by GitHub
Browse files

Lazy Import for Diffusers (#4829)



* initial commit

* move modules to import struct

* add dummy objects and _LazyModule

* add lazy import to schedulers

* clean up unused imports

* lazy import on models module

* lazy import for schedulers module

* add lazy import to pipelines module

* lazy import altdiffusion

* lazy import audio diffusion

* lazy import audioldm

* lazy import consistency model

* lazy import controlnet

* lazy import dance diffusion ddim ddpm

* lazy import deepfloyd

* lazy import kandinksy

* lazy imports

* lazy import semantic diffusion

* lazy imports

* lazy import stable diffusion

* move sd output to its own module

* clean up

* lazy import t2iadapter

* lazy import unclip

* lazy import versatile and vq diffsuion

* lazy import vq diffusion

* helper to fetch objects from modules

* lazy import sdxl

* lazy import txt2vid

* lazy import stochastic karras

* fix model imports

* fix bug

* lazy import

* clean up

* clean up

* fixes for tests

* fixes for tests

* clean up

* remove import of torch_utils from utils module

* clean up

* clean up

* fix mistake import statement

* dedicated modules for exporting and loading

* remove testing utils from utils module

* fixes from  merge conflicts

* Update src/diffusers/pipelines/kandinsky2_2/__init__.py

* fix docs

* fix alt diffusion copied from

* fix check dummies

* fix more docs

* remove accelerate import from utils module

* add type checking

* make style

* fix check dummies

* remove torch import from xformers check

* clean up error message

* fixes after upstream merges

* dummy objects fix

* fix tests

* remove unused module import

---------
Co-authored-by: default avatarPatrick von Platen <patrick.v.platen@gmail.com>
parent 88735249
...@@ -32,13 +32,18 @@ from diffusers import ( ...@@ -32,13 +32,18 @@ from diffusers import (
StableDiffusionImg2ImgPipeline, StableDiffusionImg2ImgPipeline,
UNet2DConditionModel, UNet2DConditionModel,
) )
from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device
from diffusers.utils.testing_utils import ( from diffusers.utils.testing_utils import (
enable_full_determinism, enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
nightly,
require_torch_2, require_torch_2,
require_torch_gpu, require_torch_gpu,
run_test_in_subprocess, run_test_in_subprocess,
skip_mps, skip_mps,
slow,
torch_device,
) )
from ..pipeline_params import ( from ..pipeline_params import (
......
...@@ -36,12 +36,17 @@ from diffusers import ( ...@@ -36,12 +36,17 @@ from diffusers import (
) )
from diffusers.models.attention_processor import AttnProcessor from diffusers.models.attention_processor import AttnProcessor
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint import prepare_mask_and_masked_image
from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, slow, torch_device
from diffusers.utils.testing_utils import ( from diffusers.utils.testing_utils import (
enable_full_determinism, enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
nightly,
require_torch_2, require_torch_2,
require_torch_gpu, require_torch_gpu,
run_test_in_subprocess, run_test_in_subprocess,
slow,
torch_device,
) )
from ...models.test_models_unet_2d_condition import create_lora_layers from ...models.test_models_unet_2d_condition import create_lora_layers
......
...@@ -33,8 +33,17 @@ from diffusers import ( ...@@ -33,8 +33,17 @@ from diffusers import (
UNet2DModel, UNet2DModel,
VQModel, VQModel,
) )
from diffusers.utils import floats_tensor, load_image, nightly, slow, torch_device from diffusers.utils.testing_utils import (
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, preprocess_image, require_torch_gpu enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
nightly,
preprocess_image,
require_torch_gpu,
slow,
torch_device,
)
enable_full_determinism() enable_full_determinism()
......
...@@ -32,8 +32,14 @@ from diffusers import ( ...@@ -32,8 +32,14 @@ from diffusers import (
UNet2DConditionModel, UNet2DConditionModel,
) )
from diffusers.image_processor import VaeImageProcessor from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import (
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism,
floats_tensor,
load_image,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import ( from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS, IMAGE_TO_IMAGE_IMAGE_PARAMS,
......
...@@ -20,8 +20,7 @@ import numpy as np ...@@ -20,8 +20,7 @@ import numpy as np
import torch import torch
from diffusers import StableDiffusionKDiffusionPipeline from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism() enable_full_determinism()
......
...@@ -28,8 +28,7 @@ from diffusers import ( ...@@ -28,8 +28,7 @@ from diffusers import (
StableDiffusionLDM3DPipeline, StableDiffusionLDM3DPipeline,
UNet2DConditionModel, UNet2DConditionModel,
) )
from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
......
...@@ -28,8 +28,7 @@ from diffusers import ( ...@@ -28,8 +28,7 @@ from diffusers import (
StableDiffusionModelEditingPipeline, StableDiffusionModelEditingPipeline,
UNet2DConditionModel, UNet2DConditionModel,
) )
from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
......
...@@ -29,8 +29,7 @@ from diffusers import ( ...@@ -29,8 +29,7 @@ from diffusers import (
StableDiffusionPanoramaPipeline, StableDiffusionPanoramaPipeline,
UNet2DConditionModel, UNet2DConditionModel,
) )
from diffusers.utils import nightly, torch_device from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, skip_mps, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
......
...@@ -27,10 +27,11 @@ from diffusers import ( ...@@ -27,10 +27,11 @@ from diffusers import (
StableDiffusionParadigmsPipeline, StableDiffusionParadigmsPipeline,
UNet2DConditionModel, UNet2DConditionModel,
) )
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import ( from diffusers.utils.testing_utils import (
enable_full_determinism, enable_full_determinism,
require_torch_gpu, require_torch_gpu,
slow,
torch_device,
) )
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
......
...@@ -33,8 +33,17 @@ from diffusers import ( ...@@ -33,8 +33,17 @@ from diffusers import (
UNet2DConditionModel, UNet2DConditionModel,
) )
from diffusers.image_processor import VaeImageProcessor from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_numpy, nightly, torch_device from diffusers.utils.testing_utils import (
from diffusers.utils.testing_utils import enable_full_determinism, load_image, load_pt, require_torch_gpu, skip_mps enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
load_pt,
nightly,
require_torch_gpu,
skip_mps,
torch_device,
)
from ..pipeline_params import ( from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
......
...@@ -26,8 +26,7 @@ from diffusers import ( ...@@ -26,8 +26,7 @@ from diffusers import (
StableDiffusionSAGPipeline, StableDiffusionSAGPipeline,
UNet2DConditionModel, UNet2DConditionModel,
) )
from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
......
...@@ -32,12 +32,15 @@ from diffusers import ( ...@@ -32,12 +32,15 @@ from diffusers import (
UNet2DConditionModel, UNet2DConditionModel,
logging, logging,
) )
from diffusers.utils import load_numpy, nightly, slow, torch_device
from diffusers.utils.testing_utils import ( from diffusers.utils.testing_utils import (
CaptureLogger, CaptureLogger,
enable_full_determinism, enable_full_determinism,
load_numpy,
nightly,
numpy_cosine_similarity_distance, numpy_cosine_similarity_distance,
require_torch_gpu, require_torch_gpu,
slow,
torch_device,
) )
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
......
...@@ -26,8 +26,13 @@ from diffusers import ( ...@@ -26,8 +26,13 @@ from diffusers import (
StableDiffusionAttendAndExcitePipeline, StableDiffusionAttendAndExcitePipeline,
UNet2DConditionModel, UNet2DConditionModel,
) )
from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import (
from diffusers.utils.testing_utils import numpy_cosine_similarity_distance, require_torch_gpu load_numpy,
numpy_cosine_similarity_distance,
require_torch_gpu,
skip_mps,
slow,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
......
...@@ -39,17 +39,18 @@ from diffusers import ( ...@@ -39,17 +39,18 @@ from diffusers import (
StableDiffusionDepth2ImgPipeline, StableDiffusionDepth2ImgPipeline,
UNet2DConditionModel, UNet2DConditionModel,
) )
from diffusers.utils import ( from diffusers.utils import is_accelerate_available, is_accelerate_version
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor, floats_tensor,
is_accelerate_available,
is_accelerate_version,
load_image, load_image,
load_numpy, load_numpy,
nightly, nightly,
require_torch_gpu,
skip_mps,
slow, slow,
torch_device, torch_device,
) )
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import ( from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS, IMAGE_TO_IMAGE_IMAGE_PARAMS,
......
...@@ -32,8 +32,15 @@ from diffusers import ( ...@@ -32,8 +32,15 @@ from diffusers import (
StableDiffusionDiffEditPipeline, StableDiffusionDiffEditPipeline,
UNet2DConditionModel, UNet2DConditionModel,
) )
from diffusers.utils import load_image, nightly, slow from diffusers.utils.testing_utils import (
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device enable_full_determinism,
floats_tensor,
load_image,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
......
...@@ -17,8 +17,8 @@ import gc ...@@ -17,8 +17,8 @@ import gc
import unittest import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, nightly, slow from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax from diffusers.utils.testing_utils import nightly, require_flax, slow
if is_flax_available(): if is_flax_available():
......
...@@ -17,8 +17,8 @@ import gc ...@@ -17,8 +17,8 @@ import gc
import unittest import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils import is_flax_available, load_image
from diffusers.utils.testing_utils import require_flax from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available(): if is_flax_available():
......
...@@ -23,8 +23,15 @@ from PIL import Image ...@@ -23,8 +23,15 @@ from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNet2DConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import (
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
......
...@@ -30,8 +30,15 @@ from diffusers import ( ...@@ -30,8 +30,15 @@ from diffusers import (
UNet2DConditionModel, UNet2DConditionModel,
) )
from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import (
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
......
...@@ -24,8 +24,15 @@ from PIL import Image ...@@ -24,8 +24,15 @@ from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNet2DConditionModel from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNet2DConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import (
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
slow,
torch_device,
)
enable_full_determinism() enable_full_determinism()
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment