"vscode:/vscode.git/clone" did not exist on "2f5fcf4f1cb2b3a500efb961126e341016d93f1d"
Unverified Commit b6e0b016 authored by Dhruv Nair's avatar Dhruv Nair Committed by GitHub
Browse files

Lazy Import for Diffusers (#4829)



* initial commit

* move modules to import struct

* add dummy objects and _LazyModule

* add lazy import to schedulers

* clean up unused imports

* lazy import on models module

* lazy import for schedulers module

* add lazy import to pipelines module

* lazy import altdiffusion

* lazy import audio diffusion

* lazy import audioldm

* lazy import consistency model

* lazy import controlnet

* lazy import dance diffusion ddim ddpm

* lazy import deepfloyd

* lazy import kandinksy

* lazy imports

* lazy import semantic diffusion

* lazy imports

* lazy import stable diffusion

* move sd output to its own module

* clean up

* lazy import t2iadapter

* lazy import unclip

* lazy import versatile and vq diffsuion

* lazy import vq diffusion

* helper to fetch objects from modules

* lazy import sdxl

* lazy import txt2vid

* lazy import stochastic karras

* fix model imports

* fix bug

* lazy import

* clean up

* clean up

* fixes for tests

* fixes for tests

* clean up

* remove import of torch_utils from utils module

* clean up

* clean up

* fix mistake import statement

* dedicated modules for exporting and loading

* remove testing utils from utils module

* fixes from  merge conflicts

* Update src/diffusers/pipelines/kandinsky2_2/__init__.py

* fix docs

* fix alt diffusion copied from

* fix check dummies

* fix more docs

* remove accelerate import from utils module

* add type checking

* make style

* fix check dummies

* remove torch import from xformers check

* clean up error message

* fixes after upstream merges

* dummy objects fix

* fix tests

* remove unused module import

---------
Co-authored-by: default avatarPatrick von Platen <patrick.v.platen@gmail.com>
parent 88735249
import os
from typing import Union
import PIL.Image
import PIL.ImageOps
import requests
def load_image(image: Union[str, PIL.Image.Image]) -> PIL.Image.Image:
"""
Loads `image` to a PIL Image.
Args:
image (`str` or `PIL.Image.Image`):
The image to convert to the PIL Image format.
Returns:
`PIL.Image.Image`:
A PIL Image.
"""
if isinstance(image, str):
if image.startswith("http://") or image.startswith("https://"):
image = PIL.Image.open(requests.get(image, stream=True).raw)
elif os.path.isfile(image):
image = PIL.Image.open(image)
else:
raise ValueError(
f"Incorrect path or url, URLs must start with `http://` or `https://`, and {image} is not a valid path"
)
elif isinstance(image, PIL.Image.Image):
image = image
else:
raise ValueError(
"Incorrect format used for image. Should be an url linking to an image, a local path, or a PIL image."
)
image = PIL.ImageOps.exif_transpose(image)
image = image.convert("RGB")
return image
...@@ -25,7 +25,7 @@ from diffusers.models.embeddings import get_timestep_embedding ...@@ -25,7 +25,7 @@ from diffusers.models.embeddings import get_timestep_embedding
from diffusers.models.lora import LoRACompatibleLinear from diffusers.models.lora import LoRACompatibleLinear
from diffusers.models.resnet import Downsample2D, ResnetBlock2D, Upsample2D from diffusers.models.resnet import Downsample2D, ResnetBlock2D, Upsample2D
from diffusers.models.transformer_2d import Transformer2DModel from diffusers.models.transformer_2d import Transformer2DModel
from diffusers.utils import torch_device from diffusers.utils.testing_utils import torch_device
class EmbeddingsTests(unittest.TestCase): class EmbeddingsTests(unittest.TestCase):
......
...@@ -43,8 +43,7 @@ from diffusers.models.attention_processor import ( ...@@ -43,8 +43,7 @@ from diffusers.models.attention_processor import (
LoRAAttnProcessor2_0, LoRAAttnProcessor2_0,
XFormersAttnProcessor, XFormersAttnProcessor,
) )
from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import floats_tensor, require_torch_gpu, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu, slow
def create_unet_lora_layers(unet: nn.Module): def create_unet_lora_layers(unet: nn.Module):
......
...@@ -30,12 +30,13 @@ from requests.exceptions import HTTPError ...@@ -30,12 +30,13 @@ from requests.exceptions import HTTPError
from diffusers.models import UNet2DConditionModel from diffusers.models import UNet2DConditionModel
from diffusers.models.attention_processor import AttnProcessor, AttnProcessor2_0, XFormersAttnProcessor from diffusers.models.attention_processor import AttnProcessor, AttnProcessor2_0, XFormersAttnProcessor
from diffusers.training_utils import EMAModel from diffusers.training_utils import EMAModel
from diffusers.utils import logging, torch_device from diffusers.utils import logging
from diffusers.utils.testing_utils import ( from diffusers.utils.testing_utils import (
CaptureLogger, CaptureLogger,
require_torch_2, require_torch_2,
require_torch_gpu, require_torch_gpu,
run_test_in_subprocess, run_test_in_subprocess,
torch_device,
) )
from ..others.test_utils import TOKEN, USER, is_staging_test from ..others.test_utils import TOKEN, USER, is_staging_test
......
...@@ -21,8 +21,7 @@ import torch ...@@ -21,8 +21,7 @@ import torch
from parameterized import parameterized from parameterized import parameterized
from diffusers import PriorTransformer from diffusers import PriorTransformer
from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin from .test_modeling_common import ModelTesterMixin
......
...@@ -18,7 +18,7 @@ import unittest ...@@ -18,7 +18,7 @@ import unittest
import torch import torch
from diffusers import UNet1DModel from diffusers import UNet1DModel
from diffusers.utils import floats_tensor, slow, torch_device from diffusers.utils.testing_utils import floats_tensor, slow, torch_device
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
......
...@@ -20,8 +20,14 @@ import unittest ...@@ -20,8 +20,14 @@ import unittest
import torch import torch
from diffusers import UNet2DModel from diffusers import UNet2DModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device from diffusers.utils import logging
from diffusers.utils.testing_utils import enable_full_determinism from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
slow,
torch_all_close,
torch_device,
)
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
......
...@@ -25,17 +25,17 @@ from pytest import mark ...@@ -25,17 +25,17 @@ from pytest import mark
from diffusers import UNet2DConditionModel from diffusers import UNet2DConditionModel
from diffusers.models.attention_processor import CustomDiffusionAttnProcessor, LoRAAttnProcessor from diffusers.models.attention_processor import CustomDiffusionAttnProcessor, LoRAAttnProcessor
from diffusers.utils import ( from diffusers.utils import logging
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor, floats_tensor,
load_hf_numpy, load_hf_numpy,
logging,
require_torch_gpu, require_torch_gpu,
slow, slow,
torch_all_close, torch_all_close,
torch_device, torch_device,
) )
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
......
...@@ -22,14 +22,9 @@ import torch ...@@ -22,14 +22,9 @@ import torch
from diffusers.models import ModelMixin, UNet3DConditionModel from diffusers.models import ModelMixin, UNet3DConditionModel
from diffusers.models.attention_processor import AttnProcessor, LoRAAttnProcessor from diffusers.models.attention_processor import AttnProcessor, LoRAAttnProcessor
from diffusers.utils import ( from diffusers.utils import logging
floats_tensor,
logging,
skip_mps,
torch_device,
)
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, skip_mps, torch_device
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
......
...@@ -20,9 +20,16 @@ import torch ...@@ -20,9 +20,16 @@ import torch
from parameterized import parameterized from parameterized import parameterized
from diffusers import AsymmetricAutoencoderKL, AutoencoderKL, AutoencoderTiny from diffusers import AsymmetricAutoencoderKL, AutoencoderKL, AutoencoderTiny
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_hf_numpy,
require_torch_gpu,
slow,
torch_all_close,
torch_device,
)
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
......
...@@ -18,8 +18,7 @@ import unittest ...@@ -18,8 +18,7 @@ import unittest
import torch import torch
from diffusers import VQModel from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
import unittest import unittest
from diffusers.models.unet_2d_blocks import * # noqa F403 from diffusers.models.unet_2d_blocks import * # noqa F403
from diffusers.utils import torch_device from diffusers.utils.testing_utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin from .test_unet_blocks_common import UNetBlockTesterMixin
......
...@@ -17,8 +17,8 @@ from typing import Tuple ...@@ -17,8 +17,8 @@ from typing import Tuple
import torch import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device from diffusers.utils.testing_utils import floats_tensor, require_torch, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch from diffusers.utils.torch_utils import randn_tensor
@require_torch @require_torch
......
...@@ -25,8 +25,7 @@ from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( ...@@ -25,8 +25,7 @@ from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig, RobertaSeriesConfig,
RobertaSeriesModelWithTransformation, RobertaSeriesModelWithTransformation,
) )
from diffusers.utils import nightly, torch_device from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
......
...@@ -32,8 +32,15 @@ from diffusers.pipelines.alt_diffusion.modeling_roberta_series import ( ...@@ -32,8 +32,15 @@ from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig, RobertaSeriesConfig,
RobertaSeriesModelWithTransformation, RobertaSeriesModelWithTransformation,
) )
from diffusers.utils import floats_tensor, load_image, load_numpy, nightly, torch_device from diffusers.utils import load_image
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_numpy,
nightly,
require_torch_gpu,
torch_device,
)
enable_full_determinism() enable_full_determinism()
......
...@@ -29,8 +29,7 @@ from diffusers import ( ...@@ -29,8 +29,7 @@ from diffusers import (
UNet2DConditionModel, UNet2DConditionModel,
UNet2DModel, UNet2DModel,
) )
from diffusers.utils import nightly, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism() enable_full_determinism()
......
...@@ -36,8 +36,8 @@ from diffusers import ( ...@@ -36,8 +36,8 @@ from diffusers import (
PNDMScheduler, PNDMScheduler,
UNet2DConditionModel, UNet2DConditionModel,
) )
from diffusers.utils import is_xformers_available, nightly, slow, torch_device from diffusers.utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism from diffusers.utils.testing_utils import enable_full_determinism, nightly, slow, torch_device
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
......
...@@ -44,8 +44,8 @@ from diffusers import ( ...@@ -44,8 +44,8 @@ from diffusers import (
LMSDiscreteScheduler, LMSDiscreteScheduler,
PNDMScheduler, PNDMScheduler,
) )
from diffusers.utils import is_accelerate_available, is_accelerate_version, is_xformers_available, slow, torch_device from diffusers.utils import is_accelerate_available, is_accelerate_version, is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
......
...@@ -10,8 +10,14 @@ from diffusers import ( ...@@ -10,8 +10,14 @@ from diffusers import (
ConsistencyModelPipeline, ConsistencyModelPipeline,
UNet2DModel, UNet2DModel,
) )
from diffusers.utils import nightly, randn_tensor, torch_device from diffusers.utils.testing_utils import (
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_2, require_torch_gpu enable_full_determinism,
nightly,
require_torch_2,
require_torch_gpu,
torch_device,
)
from diffusers.utils.torch_utils import randn_tensor
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin from ..test_pipelines_common import PipelineTesterMixin
......
...@@ -31,14 +31,18 @@ from diffusers import ( ...@@ -31,14 +31,18 @@ from diffusers import (
UNet2DConditionModel, UNet2DConditionModel,
) )
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import ( from diffusers.utils.testing_utils import (
enable_full_determinism, enable_full_determinism,
load_image,
load_numpy,
require_torch_2, require_torch_2,
require_torch_gpu, require_torch_gpu,
run_test_in_subprocess, run_test_in_subprocess,
slow,
torch_device,
) )
from diffusers.utils.torch_utils import randn_tensor
from ..pipeline_params import ( from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS, IMAGE_TO_IMAGE_IMAGE_PARAMS,
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment