test_stable_diffusion_sag.py 7.25 KB
Newer Older
1
# coding=utf-8
Patrick von Platen's avatar
Patrick von Platen committed
2
# Copyright 2023 HuggingFace Inc.
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import gc
import unittest

import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer

from diffusers import (
    AutoencoderKL,
    DDIMScheduler,
26
27
28
    DEISMultistepScheduler,
    DPMSolverMultistepScheduler,
    EulerDiscreteScheduler,
29
30
31
    StableDiffusionSAGPipeline,
    UNet2DConditionModel,
)
32
from diffusers.utils.testing_utils import enable_full_determinism, nightly, require_torch_gpu, torch_device
33

34
35
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
36
37


38
enable_full_determinism()
39
40


41
class StableDiffusionSAGPipelineFastTests(PipelineLatentTesterMixin, PipelineTesterMixin, unittest.TestCase):
42
    pipeline_class = StableDiffusionSAGPipeline
43
44
    params = TEXT_TO_IMAGE_PARAMS
    batch_params = TEXT_TO_IMAGE_BATCH_PARAMS
45
    image_params = TEXT_TO_IMAGE_IMAGE_PARAMS
46
    image_latents_params = TEXT_TO_IMAGE_IMAGE_PARAMS
47
48
49
50

    def get_dummy_components(self):
        torch.manual_seed(0)
        unet = UNet2DConditionModel(
51
            block_out_channels=(4, 8),
52
            layers_per_block=2,
53
54
            sample_size=8,
            norm_num_groups=1,
55
56
57
58
            in_channels=4,
            out_channels=4,
            down_block_types=("DownBlock2D", "CrossAttnDownBlock2D"),
            up_block_types=("CrossAttnUpBlock2D", "UpBlock2D"),
59
            cross_attention_dim=8,
60
61
62
63
64
65
66
67
68
69
        )
        scheduler = DDIMScheduler(
            beta_start=0.00085,
            beta_end=0.012,
            beta_schedule="scaled_linear",
            clip_sample=False,
            set_alpha_to_one=False,
        )
        torch.manual_seed(0)
        vae = AutoencoderKL(
70
71
            block_out_channels=[4, 8],
            norm_num_groups=1,
72
73
74
75
76
77
78
79
80
81
            in_channels=3,
            out_channels=3,
            down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"],
            up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"],
            latent_channels=4,
        )
        torch.manual_seed(0)
        text_encoder_config = CLIPTextConfig(
            bos_token_id=0,
            eos_token_id=2,
82
83
            hidden_size=8,
            num_hidden_layers=2,
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
            intermediate_size=37,
            layer_norm_eps=1e-05,
            num_attention_heads=4,
            pad_token_id=1,
            vocab_size=1000,
        )
        text_encoder = CLIPTextModel(text_encoder_config)
        tokenizer = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip")

        components = {
            "unet": unet,
            "scheduler": scheduler,
            "vae": vae,
            "text_encoder": text_encoder,
            "tokenizer": tokenizer,
            "safety_checker": None,
            "feature_extractor": None,
101
            "image_encoder": None,
102
103
104
105
106
107
108
109
110
111
112
113
114
115
        }
        return components

    def get_dummy_inputs(self, device, seed=0):
        if str(device).startswith("mps"):
            generator = torch.manual_seed(seed)
        else:
            generator = torch.Generator(device=device).manual_seed(seed)
        inputs = {
            "prompt": ".",
            "generator": generator,
            "num_inference_steps": 2,
            "guidance_scale": 1.0,
            "sag_scale": 1.0,
116
            "output_type": "np",
117
118
119
        }
        return inputs

120
121
122
    def test_inference_batch_single_identical(self):
        super().test_inference_batch_single_identical(expected_max_diff=3e-3)

123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
    @unittest.skip("Not necessary to test here.")
    def test_xformers_attention_forwardGenerator_pass(self):
        pass

    def test_pipeline_different_schedulers(self):
        pipeline = self.pipeline_class(**self.get_dummy_components())
        inputs = self.get_dummy_inputs("cpu")

        expected_image_size = (16, 16, 3)
        for scheduler_cls in [DDIMScheduler, DEISMultistepScheduler, DPMSolverMultistepScheduler]:
            pipeline.scheduler = scheduler_cls.from_config(pipeline.scheduler.config)
            image = pipeline(**inputs).images[0]

            shape = image.shape
            assert shape == expected_image_size

        pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config)

        with self.assertRaises(ValueError):
            # Karras schedulers are not supported
            image = pipeline(**inputs).images[0]

145

146
@nightly
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
@require_torch_gpu
class StableDiffusionPipelineIntegrationTests(unittest.TestCase):
    def tearDown(self):
        # clean up the VRAM after each test
        super().tearDown()
        gc.collect()
        torch.cuda.empty_cache()

    def test_stable_diffusion_1(self):
        sag_pipe = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")
        sag_pipe = sag_pipe.to(torch_device)
        sag_pipe.set_progress_bar_config(disable=None)

        prompt = "."
        generator = torch.manual_seed(0)
        output = sag_pipe(
            [prompt], generator=generator, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np"
        )

        image = output.images

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 512, 512, 3)
        expected_slice = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949])

Patrick von Platen's avatar
Patrick von Platen committed
173
        assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192

    def test_stable_diffusion_2(self):
        sag_pipe = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base")
        sag_pipe = sag_pipe.to(torch_device)
        sag_pipe.set_progress_bar_config(disable=None)

        prompt = "."
        generator = torch.manual_seed(0)
        output = sag_pipe(
            [prompt], generator=generator, guidance_scale=7.5, sag_scale=1.0, num_inference_steps=20, output_type="np"
        )

        image = output.images

        image_slice = image[0, -3:, -3:, -1]

        assert image.shape == (1, 512, 512, 3)
        expected_slice = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371])

Patrick von Platen's avatar
Patrick von Platen committed
193
        assert np.abs(image_slice.flatten() - expected_slice).max() < 5e-2
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215

    def test_stable_diffusion_2_non_square(self):
        sag_pipe = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base")
        sag_pipe = sag_pipe.to(torch_device)
        sag_pipe.set_progress_bar_config(disable=None)

        prompt = "."
        generator = torch.manual_seed(0)
        output = sag_pipe(
            [prompt],
            width=768,
            height=512,
            generator=generator,
            guidance_scale=7.5,
            sag_scale=1.0,
            num_inference_steps=20,
            output_type="np",
        )

        image = output.images

        assert image.shape == (1, 512, 768, 3)