Unverified Commit b6e0b016 authored by Dhruv Nair's avatar Dhruv Nair Committed by GitHub
Browse files

Lazy Import for Diffusers (#4829)



* initial commit

* move modules to import struct

* add dummy objects and _LazyModule

* add lazy import to schedulers

* clean up unused imports

* lazy import on models module

* lazy import for schedulers module

* add lazy import to pipelines module

* lazy import altdiffusion

* lazy import audio diffusion

* lazy import audioldm

* lazy import consistency model

* lazy import controlnet

* lazy import dance diffusion ddim ddpm

* lazy import deepfloyd

* lazy import kandinksy

* lazy imports

* lazy import semantic diffusion

* lazy imports

* lazy import stable diffusion

* move sd output to its own module

* clean up

* lazy import t2iadapter

* lazy import unclip

* lazy import versatile and vq diffsuion

* lazy import vq diffusion

* helper to fetch objects from modules

* lazy import sdxl

* lazy import txt2vid

* lazy import stochastic karras

* fix model imports

* fix bug

* lazy import

* clean up

* clean up

* fixes for tests

* fixes for tests

* clean up

* remove import of torch_utils from utils module

* clean up

* clean up

* fix mistake import statement

* dedicated modules for exporting and loading

* remove testing utils from utils module

* fixes from  merge conflicts

* Update src/diffusers/pipelines/kandinsky2_2/__init__.py

* fix docs

* fix alt diffusion copied from

* fix check dummies

* fix more docs

* remove accelerate import from utils module

* add type checking

* make style

* fix check dummies

* remove torch import from xformers check

* clean up error message

* fixes after upstream merges

* dummy objects fix

* fix tests

* remove unused module import

---------
Co-authored-by: default avatarPatrick von Platen <patrick.v.platen@gmail.com>
parent 88735249
......@@ -2,29 +2,25 @@
Utility and helper functions for working with 🤗 Diffusers.
## randn_tensor
[[autodoc]] diffusers.utils.randn_tensor
## numpy_to_pil
[[autodoc]] utils.pil_utils.numpy_to_pil
[[autodoc]] utils.numpy_to_pil
## pt_to_pil
[[autodoc]] utils.pil_utils.pt_to_pil
[[autodoc]] utils.pt_to_pil
## load_image
[[autodoc]] utils.testing_utils.load_image
[[autodoc]] utils.load_image
## export_to_gif
[[autodoc]] utils.testing_utils.export_to_gif
[[autodoc]] utils.export_to_gif
## export_to_video
[[autodoc]] utils.testing_utils.export_to_video
[[autodoc]] utils.export_to_video
## make_image_grid
......
......@@ -113,7 +113,7 @@ print(np.abs(image).sum())
The result is not the same even though you're using an identical seed because the GPU uses a different random number generator than the CPU.
To circumvent this problem, 🧨 Diffusers has a [`~diffusers.utils.randn_tensor`] function for creating random noise on the CPU, and then moving the tensor to a GPU if necessary. The `randn_tensor` function is used everywhere inside the pipeline, allowing the user to **always** pass a CPU `Generator` even if the pipeline is run on a GPU.
To circumvent this problem, 🧨 Diffusers has a [`~diffusers.utils.torch_utils.randn_tensor`] function for creating random noise on the CPU, and then moving the tensor to a GPU if necessary. The `randn_tensor` function is used everywhere inside the pipeline, allowing the user to **always** pass a CPU `Generator` even if the pipeline is run on a GPU.
You'll see the results are much closer now!
......
......@@ -19,10 +19,8 @@ from diffusers import (
UNet2DConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
from diffusers.utils import PIL_INTERPOLATION
from diffusers.utils.torch_utils import randn_tensor
def preprocess(image, w, h):
......
......@@ -19,11 +19,8 @@ from diffusers import (
UNet2DConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
deprecate,
randn_tensor,
)
from diffusers.utils import PIL_INTERPOLATION, deprecate
from diffusers.utils.torch_utils import randn_tensor
EXAMPLE_DOC_STRING = """
......
......@@ -20,7 +20,7 @@ from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
from diffusers.utils.torch_utils import randn_tensor
trans = transforms.Compose(
......
......@@ -21,8 +21,8 @@ from diffusers.utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
)
from diffusers.utils.torch_utils import randn_tensor
# ------------------------------------------------------------------------------
......
......@@ -30,9 +30,9 @@ from diffusers.utils import (
is_accelerate_version,
is_invisible_watermark_available,
logging,
randn_tensor,
replace_example_docstring,
)
from diffusers.utils.torch_utils import randn_tensor
if is_invisible_watermark_available():
......
......@@ -14,6 +14,7 @@
from typing import List, Optional, Union
import torch
from diffuser.utils.torch_utils import randn_tensor
from packaging import version
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
......@@ -30,7 +31,6 @@ from diffusers.schedulers import EulerAncestralDiscreteScheduler, KarrasDiffusio
from diffusers.utils import (
deprecate,
logging,
randn_tensor,
replace_example_docstring,
)
......
......@@ -35,9 +35,9 @@ from diffusers.utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from diffusers.utils.torch_utils import randn_tensor
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
......
......@@ -8,6 +8,7 @@ from typing import Any, Callable, Dict, List, Optional, Union
import numpy as np
import PIL.Image
import torch
from diffuser.utils.torch_utils import randn_tensor
from PIL import Image
from transformers import CLIPTokenizer
......@@ -19,7 +20,6 @@ from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (
deprecate,
logging,
randn_tensor,
replace_example_docstring,
)
......
......@@ -11,6 +11,7 @@ import PIL.Image
import pycuda.driver as cuda
import tensorrt as trt
import torch
from diffuser.utils.torch_utils import randn_tensor
from PIL import Image
from pycuda.tools import make_default_context
from transformers import CLIPTokenizer
......@@ -23,7 +24,6 @@ from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import (
deprecate,
logging,
randn_tensor,
replace_example_docstring,
)
......
......@@ -16,9 +16,9 @@ from diffusers.utils import (
PIL_INTERPOLATION,
is_accelerate_available,
is_accelerate_version,
randn_tensor,
replace_example_docstring,
)
from diffusers.utils.torch_utils import randn_tensor
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
......
......@@ -17,9 +17,9 @@ from diffusers.utils import (
PIL_INTERPOLATION,
is_accelerate_available,
is_accelerate_version,
randn_tensor,
replace_example_docstring,
)
from diffusers.utils.torch_utils import randn_tensor
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
......
......@@ -16,9 +16,9 @@ from diffusers.utils import (
PIL_INTERPOLATION,
is_accelerate_available,
is_accelerate_version,
randn_tensor,
replace_example_docstring,
)
from diffusers.utils.torch_utils import randn_tensor
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
......
......@@ -11,7 +11,8 @@ from diffusers.models.attention import BasicTransformerBlock
from diffusers.models.unet_2d_blocks import CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, UpBlock2D
from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import is_compiled_module, logging, randn_tensor
from diffusers.utils import logging
from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
......
......@@ -31,9 +31,9 @@ from diffusers.utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from diffusers.utils.torch_utils import randn_tensor
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
......
......@@ -10,7 +10,8 @@ from diffusers.models.attention import BasicTransformerBlock
from diffusers.models.unet_2d_blocks import CrossAttnDownBlock2D, CrossAttnUpBlock2D, DownBlock2D, UpBlock2D
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import rescale_noise_cfg
from diffusers.utils import PIL_INTERPOLATION, logging, randn_tensor
from diffusers.utils import PIL_INTERPOLATION, logging
from diffusers.utils.torch_utils import randn_tensor
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
......
......@@ -33,8 +33,8 @@ from diffusers.utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
)
from diffusers.utils.torch_utils import randn_tensor
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
......
......@@ -15,7 +15,8 @@ from diffusers.models.unet_2d_blocks import (
UpBlock2D,
)
from diffusers.pipelines.stable_diffusion_xl import StableDiffusionXLPipelineOutput
from diffusers.utils import PIL_INTERPOLATION, logging, randn_tensor
from diffusers.utils import PIL_INTERPOLATION, logging
from diffusers.utils.torch_utils import randn_tensor
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
......
......@@ -8,7 +8,8 @@ from transformers.models.clip.modeling_clip import CLIPTextModelOutput
from diffusers.models import PriorTransformer
from diffusers.pipelines import DiffusionPipeline, StableDiffusionImageVariationPipeline
from diffusers.schedulers import UnCLIPScheduler
from diffusers.utils import logging, randn_tensor
from diffusers.utils import logging
from diffusers.utils.torch_utils import randn_tensor
logger = logging.get_logger(__name__) # pylint: disable=invalid-name
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment