Unverified Commit 29b2c93c authored by Patrick von Platen's avatar Patrick von Platen Committed by GitHub
Browse files

Make repo structure consistent (#1862)



* move files a bit

* more refactors

* fix more

* more fixes

* fix more onnx

* make style

* upload

* fix

* up

* fix more

* up again

* up

* small fix

* Update src/diffusers/__init__.py
Co-authored-by: default avatarPedro Cuenca <pedro@huggingface.co>

* correct
Co-authored-by: default avatarPedro Cuenca <pedro@huggingface.co>
parent ab0e92fd
# Copyright 2022 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
hf_cache_home = os.path.expanduser(
os.getenv("HF_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "huggingface"))
)
default_cache_path = os.path.join(hf_cache_home, "diffusers")
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "diffusion_pytorch_model.bin"
FLAX_WEIGHTS_NAME = "diffusion_flax_model.msgpack"
ONNX_WEIGHTS_NAME = "model.onnx"
SAFETENSORS_WEIGHTS_NAME = "diffusion_pytorch_model.safetensors"
ONNX_EXTERNAL_WEIGHTS_NAME = "weights.pb"
HUGGINGFACE_CO_RESOLVE_ENDPOINT = "https://huggingface.co"
DIFFUSERS_CACHE = default_cache_path
DIFFUSERS_DYNAMIC_MODULE_NAME = "diffusers_modules"
HF_MODULES_CACHE = os.getenv("HF_MODULES_CACHE", os.path.join(hf_cache_home, "modules"))
_COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS = [
"DDIMScheduler",
"DDPMScheduler",
"PNDMScheduler",
"LMSDiscreteScheduler",
"EulerDiscreteScheduler",
"HeunDiscreteScheduler",
"EulerAncestralDiscreteScheduler",
"DPMSolverMultistepScheduler",
"DPMSolverSinglestepScheduler",
]
# This file is autogenerated by the command `make fix-copies`, do not edit.
# flake8: noqa
from ..utils import DummyObject, requires_backends
class OnnxRuntimeModel(metaclass=DummyObject):
_backends = ["onnx"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["onnx"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["onnx"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["onnx"])
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
from ..utils import DummyObject, requires_backends from ..utils import DummyObject, requires_backends
class ModelMixin(metaclass=DummyObject): class AutoencoderKL(metaclass=DummyObject):
_backends = ["torch"] _backends = ["torch"]
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
...@@ -19,7 +19,7 @@ class ModelMixin(metaclass=DummyObject): ...@@ -19,7 +19,7 @@ class ModelMixin(metaclass=DummyObject):
requires_backends(cls, ["torch"]) requires_backends(cls, ["torch"])
class AutoencoderKL(metaclass=DummyObject): class ModelMixin(metaclass=DummyObject):
_backends = ["torch"] _backends = ["torch"]
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
...@@ -152,7 +152,7 @@ def get_scheduler(*args, **kwargs): ...@@ -152,7 +152,7 @@ def get_scheduler(*args, **kwargs):
requires_backends(get_scheduler, ["torch"]) requires_backends(get_scheduler, ["torch"])
class DiffusionPipeline(metaclass=DummyObject): class AudioPipelineOutput(metaclass=DummyObject):
_backends = ["torch"] _backends = ["torch"]
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
...@@ -212,6 +212,36 @@ class DDPMPipeline(metaclass=DummyObject): ...@@ -212,6 +212,36 @@ class DDPMPipeline(metaclass=DummyObject):
requires_backends(cls, ["torch"]) requires_backends(cls, ["torch"])
class DiffusionPipeline(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class ImagePipelineOutput(metaclass=DummyObject):
_backends = ["torch"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch"])
class KarrasVePipeline(metaclass=DummyObject): class KarrasVePipeline(metaclass=DummyObject):
_backends = ["torch"] _backends = ["torch"]
......
...@@ -28,8 +28,8 @@ from urllib import request ...@@ -28,8 +28,8 @@ from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from . import __version__ from .. import __version__
from .utils import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
COMMUNITY_PIPELINES_URL = ( COMMUNITY_PIPELINES_URL = (
...@@ -172,7 +172,7 @@ def find_pipeline_class(loaded_module): ...@@ -172,7 +172,7 @@ def find_pipeline_class(loaded_module):
Retrieve pipeline class that inherits from `DiffusionPipeline`. Note that there has to be exactly one class Retrieve pipeline class that inherits from `DiffusionPipeline`. Note that there has to be exactly one class
inheriting from `DiffusionPipeline`. inheriting from `DiffusionPipeline`.
""" """
from .pipeline_utils import DiffusionPipeline from ..pipelines import DiffusionPipeline
cls_members = dict(inspect.getmembers(loaded_module, inspect.isclass)) cls_members = dict(inspect.getmembers(loaded_module, inspect.isclass))
......
...@@ -22,9 +22,10 @@ from uuid import uuid4 ...@@ -22,9 +22,10 @@ from uuid import uuid4
from huggingface_hub import HfFolder, whoami from huggingface_hub import HfFolder, whoami
from . import __version__ from .. import __version__
from .utils import ENV_VARS_TRUE_VALUES, HUGGINGFACE_CO_RESOLVE_ENDPOINT, logging from .constants import HUGGINGFACE_CO_RESOLVE_ENDPOINT
from .utils.import_utils import ( from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version, _flax_version,
_jax_version, _jax_version,
_onnxruntime_version, _onnxruntime_version,
...@@ -34,13 +35,14 @@ from .utils.import_utils import ( ...@@ -34,13 +35,14 @@ from .utils.import_utils import (
is_onnx_available, is_onnx_available,
is_torch_available, is_torch_available,
) )
from .logging import get_logger
if is_modelcards_available(): if is_modelcards_available():
from modelcards import CardData, ModelCard from modelcards import CardData, ModelCard
logger = logging.get_logger(__name__) logger = get_logger(__name__)
MODEL_CARD_TEMPLATE_PATH = Path(__file__).parent / "utils" / "model_card_template.md" MODEL_CARD_TEMPLATE_PATH = Path(__file__).parent / "utils" / "model_card_template.md"
......
...@@ -18,7 +18,7 @@ from typing import Optional, Tuple, Union ...@@ -18,7 +18,7 @@ from typing import Optional, Tuple, Union
import torch import torch
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput from diffusers import DiffusionPipeline, ImagePipelineOutput
class CustomLocalPipeline(DiffusionPipeline): class CustomLocalPipeline(DiffusionPipeline):
...@@ -63,10 +63,10 @@ class CustomLocalPipeline(DiffusionPipeline): ...@@ -63,10 +63,10 @@ class CustomLocalPipeline(DiffusionPipeline):
The output format of the generate image. Choose between The output format of the generate image. Choose between
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
return_dict (`bool`, *optional*, defaults to `True`): return_dict (`bool`, *optional*, defaults to `True`):
Whether or not to return a [`~pipeline_utils.ImagePipelineOutput`] instead of a plain tuple. Whether or not to return a [`~pipelines.ImagePipelineOutput`] instead of a plain tuple.
Returns: Returns:
[`~pipeline_utils.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if [`~pipelines.ImagePipelineOutput`] or `tuple`: [`~pipelines.utils.ImagePipelineOutput`] if
`return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the `return_dict` is True, otherwise a `tuple. When returning a tuple, the first element is a list with the
generated images. generated images.
""" """
......
...@@ -19,7 +19,7 @@ import unittest ...@@ -19,7 +19,7 @@ import unittest
import torch import torch
from diffusers import AutoencoderKL from diffusers import AutoencoderKL
from diffusers.modeling_utils import ModelMixin from diffusers.models import ModelMixin
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from parameterized import parameterized from parameterized import parameterized
......
...@@ -20,8 +20,13 @@ import unittest ...@@ -20,8 +20,13 @@ import unittest
import numpy as np import numpy as np
import torch import torch
from diffusers import UnCLIPImageVariationPipeline, UnCLIPScheduler, UNet2DConditionModel, UNet2DModel from diffusers import (
from diffusers.pipeline_utils import DiffusionPipeline DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNet2DConditionModel,
UNet2DModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import load_image, require_torch_gpu from diffusers.utils.testing_utils import load_image, require_torch_gpu
......
...@@ -21,7 +21,7 @@ from typing import Dict, List, Tuple ...@@ -21,7 +21,7 @@ from typing import Dict, List, Tuple
import numpy as np import numpy as np
import torch import torch
from diffusers.modeling_utils import ModelMixin from diffusers.models import ModelMixin
from diffusers.training_utils import EMAModel from diffusers.training_utils import EMAModel
from diffusers.utils import torch_device from diffusers.utils import torch_device
......
...@@ -33,6 +33,7 @@ from diffusers import ( ...@@ -33,6 +33,7 @@ from diffusers import (
DDIMScheduler, DDIMScheduler,
DDPMPipeline, DDPMPipeline,
DDPMScheduler, DDPMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler, DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler, EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler, EulerDiscreteScheduler,
...@@ -45,7 +46,6 @@ from diffusers import ( ...@@ -45,7 +46,6 @@ from diffusers import (
UNet2DModel, UNet2DModel,
logging, logging,
) )
from diffusers.pipeline_utils import DiffusionPipeline
from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
from diffusers.utils import CONFIG_NAME, WEIGHTS_NAME, floats_tensor, nightly, slow, torch_device from diffusers.utils import CONFIG_NAME, WEIGHTS_NAME, floats_tensor, nightly, slow, torch_device
from diffusers.utils.testing_utils import CaptureLogger, get_tests_dir, require_torch_gpu from diffusers.utils.testing_utils import CaptureLogger, get_tests_dir, require_torch_gpu
...@@ -704,7 +704,7 @@ class PipelineSlowTests(unittest.TestCase): ...@@ -704,7 +704,7 @@ class PipelineSlowTests(unittest.TestCase):
def test_warning_unused_kwargs(self): def test_warning_unused_kwargs(self):
model_id = "hf-internal-testing/unet-pipeline-dummy" model_id = "hf-internal-testing/unet-pipeline-dummy"
logger = logging.get_logger("diffusers.pipeline_utils") logger = logging.get_logger("diffusers.pipelines")
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with CaptureLogger(logger) as cap_logger: with CaptureLogger(logger) as cap_logger:
DiffusionPipeline.from_pretrained( DiffusionPipeline.from_pretrained(
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment