Unverified Commit 0eb507f2 authored by Suraj Patil's avatar Suraj Patil Committed by GitHub
Browse files

StableDiffusionImageVariationPipeline (#1365)



* add StableDiffusionImageVariationPipeline

* add ini init

* use CLIPVisionModelWithProjection

* fix _encode_image

* add copied from

* fix copies

* add doc

* handle tensor in _encode_image

* add tests

* correct model_id

* remove copied from in enable_sequential_cpu_offload

* fix tests

* make slow tests pass

* update slow tests

* use temp model for now

* fix test_stable_diffusion_img_variation_intermediate_state

* fix test_stable_diffusion_img_variation_intermediate_state

* check for torch.Tensor

* quality

* fix name

* fix slow tests

* install transformers from source

* fix install

* fix install

* Apply suggestions from code review
Co-authored-by: default avatarPedro Cuenca <pedro@huggingface.co>

* input_image -> image

* remove deprication warnings

* fix test_stable_diffusion_img_variation_multiple_images

* make flake happy
Co-authored-by: default avatarPedro Cuenca <pedro@huggingface.co>
parent 9e234d80
...@@ -60,6 +60,7 @@ jobs: ...@@ -60,6 +60,7 @@ jobs:
run: | run: |
python -m pip install -e .[quality,test] python -m pip install -e .[quality,test]
python -m pip install git+https://github.com/huggingface/accelerate python -m pip install git+https://github.com/huggingface/accelerate
python -m pip install -U git+https://github.com/huggingface/transformers
- name: Environment - name: Environment
run: | run: |
...@@ -127,6 +128,7 @@ jobs: ...@@ -127,6 +128,7 @@ jobs:
${CONDA_RUN} python -m pip install -e .[quality,test] ${CONDA_RUN} python -m pip install -e .[quality,test]
${CONDA_RUN} python -m pip install --pre torch==${MPS_TORCH_VERSION} --extra-index-url https://download.pytorch.org/whl/test/cpu ${CONDA_RUN} python -m pip install --pre torch==${MPS_TORCH_VERSION} --extra-index-url https://download.pytorch.org/whl/test/cpu
${CONDA_RUN} python -m pip install git+https://github.com/huggingface/accelerate ${CONDA_RUN} python -m pip install git+https://github.com/huggingface/accelerate
${CONDA_RUN} python -m pip install -U git+https://github.com/huggingface/transformers
- name: Environment - name: Environment
shell: arch -arch arm64 bash {0} shell: arch -arch arm64 bash {0}
......
...@@ -62,6 +62,7 @@ jobs: ...@@ -62,6 +62,7 @@ jobs:
run: | run: |
python -m pip install -e .[quality,test] python -m pip install -e .[quality,test]
python -m pip install git+https://github.com/huggingface/accelerate python -m pip install git+https://github.com/huggingface/accelerate
python -m pip install -U git+https://github.com/huggingface/transformers
- name: Environment - name: Environment
run: | run: |
...@@ -131,6 +132,7 @@ jobs: ...@@ -131,6 +132,7 @@ jobs:
run: | run: |
python -m pip install -e .[quality,test,training] python -m pip install -e .[quality,test,training]
python -m pip install git+https://github.com/huggingface/accelerate python -m pip install git+https://github.com/huggingface/accelerate
python -m pip install -U git+https://github.com/huggingface/transformers
- name: Environment - name: Environment
run: | run: |
......
...@@ -88,3 +88,10 @@ If you want to use all possible use cases in a single `DiffusionPipeline` you ca ...@@ -88,3 +88,10 @@ If you want to use all possible use cases in a single `DiffusionPipeline` you ca
- __call__ - __call__
- enable_attention_slicing - enable_attention_slicing
- disable_attention_slicing - disable_attention_slicing
## StableDiffusionImageVariationPipeline
[[autodoc]] StableDiffusionImageVariationPipeline
- __call__
- enable_attention_slicing
- disable_attention_slicing
...@@ -69,6 +69,7 @@ if is_torch_available() and is_transformers_available(): ...@@ -69,6 +69,7 @@ if is_torch_available() and is_transformers_available():
AltDiffusionPipeline, AltDiffusionPipeline,
CycleDiffusionPipeline, CycleDiffusionPipeline,
LDMTextToImagePipeline, LDMTextToImagePipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImg2ImgPipeline, StableDiffusionImg2ImgPipeline,
StableDiffusionInpaintPipeline, StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy, StableDiffusionInpaintPipelineLegacy,
......
...@@ -19,6 +19,7 @@ if is_torch_available() and is_transformers_available(): ...@@ -19,6 +19,7 @@ if is_torch_available() and is_transformers_available():
from .latent_diffusion import LDMTextToImagePipeline from .latent_diffusion import LDMTextToImagePipeline
from .stable_diffusion import ( from .stable_diffusion import (
CycleDiffusionPipeline, CycleDiffusionPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImg2ImgPipeline, StableDiffusionImg2ImgPipeline,
StableDiffusionInpaintPipeline, StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy, StableDiffusionInpaintPipelineLegacy,
......
...@@ -30,6 +30,7 @@ class StableDiffusionPipelineOutput(BaseOutput): ...@@ -30,6 +30,7 @@ class StableDiffusionPipelineOutput(BaseOutput):
if is_transformers_available() and is_torch_available(): if is_transformers_available() and is_torch_available():
from .pipeline_cycle_diffusion import CycleDiffusionPipeline from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
from .pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline from .pipeline_stable_diffusion_img2img import StableDiffusionImg2ImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
......
...@@ -64,6 +64,21 @@ class LDMTextToImagePipeline(metaclass=DummyObject): ...@@ -64,6 +64,21 @@ class LDMTextToImagePipeline(metaclass=DummyObject):
requires_backends(cls, ["torch", "transformers"]) requires_backends(cls, ["torch", "transformers"])
class StableDiffusionImageVariationPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"]
def __init__(self, *args, **kwargs):
requires_backends(self, ["torch", "transformers"])
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, ["torch", "transformers"])
class StableDiffusionImg2ImgPipeline(metaclass=DummyObject): class StableDiffusionImg2ImgPipeline(metaclass=DummyObject):
_backends = ["torch", "transformers"] _backends = ["torch", "transformers"]
......
# coding=utf-8
# Copyright 2022 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
AutoencoderKL,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionImageVariationPipeline,
UNet2DConditionModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
from transformers import CLIPVisionConfig, CLIPVisionModelWithProjection
from ...test_pipelines_common import PipelineTesterMixin
torch.backends.cuda.matmul.allow_tf32 = False
class StableDiffusionImageVariationPipelineFastTests(PipelineTesterMixin, unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def dummy_image(self):
batch_size = 1
num_channels = 3
sizes = (32, 32)
image = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0)).to(torch_device)
return image
@property
def dummy_cond_unet(self):
torch.manual_seed(0)
model = UNet2DConditionModel(
block_out_channels=(32, 64),
layers_per_block=2,
sample_size=32,
in_channels=4,
out_channels=4,
down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
cross_attention_dim=32,
)
return model
@property
def dummy_vae(self):
torch.manual_seed(0)
model = AutoencoderKL(
block_out_channels=[32, 64],
in_channels=3,
out_channels=3,
down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
latent_channels=4,
)
return model
@property
def dummy_image_encoder(self):
torch.manual_seed(0)
config = CLIPVisionConfig(
hidden_size=32,
projection_dim=32,
intermediate_size=37,
layer_norm_eps=1e-05,
num_attention_heads=4,
num_hidden_layers=5,
image_size=32,
patch_size=4,
)
return CLIPVisionModelWithProjection(config)
@property
def dummy_extractor(self):
def extract(*args, **kwargs):
class Out:
def __init__(self):
self.pixel_values = torch.ones([0])
def to(self, device):
self.pixel_values.to(device)
return self
return Out()
return extract
def test_stable_diffusion_img_variation_default_case(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
unet = self.dummy_cond_unet
scheduler = PNDMScheduler(skip_prk_steps=True)
vae = self.dummy_vae
image_encoder = self.dummy_image_encoder
init_image = self.dummy_image.to(device)
# make sure here that pndm scheduler skips prk
sd_pipe = StableDiffusionImageVariationPipeline(
unet=unet,
scheduler=scheduler,
vae=vae,
image_encoder=image_encoder,
safety_checker=None,
feature_extractor=self.dummy_extractor,
)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device=device).manual_seed(0)
output = sd_pipe(
init_image,
generator=generator,
guidance_scale=6.0,
num_inference_steps=2,
output_type="np",
)
image = output.images
generator = torch.Generator(device=device).manual_seed(0)
image_from_tuple = sd_pipe(
init_image,
generator=generator,
guidance_scale=6.0,
num_inference_steps=2,
output_type="np",
return_dict=False,
)[0]
image_slice = image[0, -3:, -3:, -1]
print(image_slice.flatten())
image_from_tuple_slice = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
expected_slice = np.array([0.4935, 0.4784, 0.4802, 0.5027, 0.4805, 0.5149, 0.5143, 0.4879, 0.4731])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < 1e-3
def test_stable_diffusion_img_variation_multiple_images(self):
device = "cpu" # ensure determinism for the device-dependent torch.Generator
unet = self.dummy_cond_unet
scheduler = PNDMScheduler(skip_prk_steps=True)
vae = self.dummy_vae
image_encoder = self.dummy_image_encoder
init_image = self.dummy_image.to(device).repeat(2, 1, 1, 1)
# make sure here that pndm scheduler skips prk
sd_pipe = StableDiffusionImageVariationPipeline(
unet=unet,
scheduler=scheduler,
vae=vae,
image_encoder=image_encoder,
safety_checker=None,
feature_extractor=self.dummy_extractor,
)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device=device).manual_seed(0)
output = sd_pipe(
init_image,
generator=generator,
guidance_scale=6.0,
num_inference_steps=2,
output_type="np",
)
image = output.images
image_slice = image[-1, -3:, -3:, -1]
assert image.shape == (2, 128, 128, 3)
expected_slice = np.array([0.4939, 0.4627, 0.4831, 0.5710, 0.5387, 0.4428, 0.5230, 0.5545, 0.4586])
assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-3
def test_stable_diffusion_img_variation_num_images_per_prompt(self):
device = "cpu"
unet = self.dummy_cond_unet
scheduler = PNDMScheduler(skip_prk_steps=True)
vae = self.dummy_vae
image_encoder = self.dummy_image_encoder
init_image = self.dummy_image.to(device)
# make sure here that pndm scheduler skips prk
sd_pipe = StableDiffusionImageVariationPipeline(
unet=unet,
scheduler=scheduler,
vae=vae,
image_encoder=image_encoder,
safety_checker=None,
feature_extractor=self.dummy_extractor,
)
sd_pipe = sd_pipe.to(device)
sd_pipe.set_progress_bar_config(disable=None)
# test num_images_per_prompt=1 (default)
images = sd_pipe(
init_image,
num_inference_steps=2,
output_type="np",
).images
assert images.shape == (1, 128, 128, 3)
# test num_images_per_prompt=1 (default) for batch of images
batch_size = 2
images = sd_pipe(
init_image.repeat(batch_size, 1, 1, 1),
num_inference_steps=2,
output_type="np",
).images
assert images.shape == (batch_size, 128, 128, 3)
# test num_images_per_prompt for single prompt
num_images_per_prompt = 2
images = sd_pipe(
init_image,
num_inference_steps=2,
output_type="np",
num_images_per_prompt=num_images_per_prompt,
).images
assert images.shape == (num_images_per_prompt, 128, 128, 3)
# test num_images_per_prompt for batch of prompts
batch_size = 2
images = sd_pipe(
init_image.repeat(batch_size, 1, 1, 1),
num_inference_steps=2,
output_type="np",
num_images_per_prompt=num_images_per_prompt,
).images
assert images.shape == (batch_size * num_images_per_prompt, 128, 128, 3)
@unittest.skipIf(torch_device != "cuda", "This test requires a GPU")
def test_stable_diffusion_img_variation_fp16(self):
"""Test that stable diffusion img2img works with fp16"""
unet = self.dummy_cond_unet
scheduler = PNDMScheduler(skip_prk_steps=True)
vae = self.dummy_vae
image_encoder = self.dummy_image_encoder
init_image = self.dummy_image.to(torch_device).float()
# put models in fp16
unet = unet.half()
vae = vae.half()
image_encoder = image_encoder.half()
# make sure here that pndm scheduler skips prk
sd_pipe = StableDiffusionImageVariationPipeline(
unet=unet,
scheduler=scheduler,
vae=vae,
image_encoder=image_encoder,
safety_checker=None,
feature_extractor=self.dummy_extractor,
)
sd_pipe = sd_pipe.to(torch_device)
sd_pipe.set_progress_bar_config(disable=None)
generator = torch.Generator(device=torch_device).manual_seed(0)
image = sd_pipe(
init_image,
generator=generator,
num_inference_steps=2,
output_type="np",
).images
assert image.shape == (1, 128, 128, 3)
@slow
@require_torch_gpu
class StableDiffusionImageVariationPipelineIntegrationTests(unittest.TestCase):
def tearDown(self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def test_stable_diffusion_img_variation_pipeline_default(self):
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/vermeer.jpg"
)
init_image = init_image.resize((512, 512))
expected_image = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/vermeer.npy"
)
model_id = "fusing/sd-image-variations-diffusers"
pipe = StableDiffusionImageVariationPipeline.from_pretrained(
model_id,
safety_checker=None,
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device=torch_device).manual_seed(0)
output = pipe(
init_image,
guidance_scale=7.5,
generator=generator,
output_type="np",
)
image = output.images[0]
assert image.shape == (512, 512, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image).max() < 1e-3
def test_stable_diffusion_img_variation_intermediate_state(self):
number_of_steps = 0
def test_callback_fn(step: int, timestep: int, latents: torch.FloatTensor) -> None:
test_callback_fn.has_been_called = True
nonlocal number_of_steps
number_of_steps += 1
if step == 0:
latents = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
latents_slice = latents[0, -3:, -3:, -1]
expected_slice = np.array([1.83, 1.293, -0.09705, 1.256, -2.293, 1.091, -0.0809, -0.65, -2.953])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-3
elif step == 37:
latents = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
latents_slice = latents[0, -3:, -3:, -1]
expected_slice = np.array([2.285, 2.703, 1.969, 0.696, -1.323, 0.9253, -0.5464, -1.521, -2.537])
assert np.abs(latents_slice.flatten() - expected_slice).max() < 1e-2
test_callback_fn.has_been_called = False
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg"
)
init_image = init_image.resize((512, 512))
pipe = StableDiffusionImageVariationPipeline.from_pretrained(
"fusing/sd-image-variations-diffusers",
torch_dtype=torch.float16,
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing()
generator = torch.Generator(device=torch_device).manual_seed(0)
with torch.autocast(torch_device):
pipe(
init_image,
num_inference_steps=50,
guidance_scale=7.5,
generator=generator,
callback=test_callback_fn,
callback_steps=1,
)
assert test_callback_fn.has_been_called
assert number_of_steps == 51
def test_stable_diffusion_pipeline_with_sequential_cpu_offloading(self):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
init_image = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg"
)
init_image = init_image.resize((512, 512))
model_id = "fusing/sd-image-variations-diffusers"
lms = LMSDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
pipe = StableDiffusionImageVariationPipeline.from_pretrained(
model_id, scheduler=lms, safety_checker=None, torch_dtype=torch.float16
)
pipe.to(torch_device)
pipe.set_progress_bar_config(disable=None)
pipe.enable_attention_slicing(1)
pipe.enable_sequential_cpu_offload()
generator = torch.Generator(device=torch_device).manual_seed(0)
_ = pipe(
init_image,
guidance_scale=7.5,
generator=generator,
output_type="np",
num_inference_steps=5,
)
mem_bytes = torch.cuda.max_memory_allocated()
# make sure that less than 2.6 GB is allocated
assert mem_bytes < 2.6 * 10**9
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment