consistency_decoder_vae.py 19.3 KB
Newer Older
Aryan's avatar
Aryan committed
1
# Copyright 2025 The HuggingFace Team. All rights reserved.
Will Berman's avatar
Will Berman committed
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union

import torch
import torch.nn.functional as F
from torch import nn

21
22
23
24
25
26
from ...configuration_utils import ConfigMixin, register_to_config
from ...schedulers import ConsistencyDecoderScheduler
from ...utils import BaseOutput
from ...utils.accelerate_utils import apply_forward_hook
from ...utils.torch_utils import randn_tensor
from ..attention_processor import (
Will Berman's avatar
Will Berman committed
27
28
29
30
31
32
    ADDED_KV_ATTENTION_PROCESSORS,
    CROSS_ATTENTION_PROCESSORS,
    AttentionProcessor,
    AttnAddedKVProcessor,
    AttnProcessor,
)
33
from ..modeling_utils import ModelMixin
34
from ..unets.unet_2d import UNet2DModel
Will Berman's avatar
Will Berman committed
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
from .vae import DecoderOutput, DiagonalGaussianDistribution, Encoder


@dataclass
class ConsistencyDecoderVAEOutput(BaseOutput):
    """
    Output of encoding method.

    Args:
        latent_dist (`DiagonalGaussianDistribution`):
            Encoded outputs of `Encoder` represented as the mean and logvar of `DiagonalGaussianDistribution`.
            `DiagonalGaussianDistribution` allows for sampling latents from the distribution.
    """

    latent_dist: "DiagonalGaussianDistribution"


class ConsistencyDecoderVAE(ModelMixin, ConfigMixin):
    r"""
    The consistency decoder used with DALL-E 3.

    Examples:
        ```py
        >>> import torch
59
        >>> from diffusers import StableDiffusionPipeline, ConsistencyDecoderVAE
Will Berman's avatar
Will Berman committed
60

61
        >>> vae = ConsistencyDecoderVAE.from_pretrained("openai/consistency-decoder", torch_dtype=torch.float16)
Will Berman's avatar
Will Berman committed
62
        >>> pipe = StableDiffusionPipeline.from_pretrained(
63
        ...     "stable-diffusion-v1-5/stable-diffusion-v1-5", vae=vae, torch_dtype=torch.float16
Will Berman's avatar
Will Berman committed
64
65
        ... ).to("cuda")

66
67
        >>> image = pipe("horse", generator=torch.manual_seed(0)).images[0]
        >>> image
Will Berman's avatar
Will Berman committed
68
69
70
        ```
    """

Aryan's avatar
Aryan committed
71
72
    _supports_group_offloading = False

Will Berman's avatar
Will Berman committed
73
    @register_to_config
74
75
    def __init__(
        self,
76
77
        scaling_factor: float = 0.18215,
        latent_channels: int = 4,
78
        sample_size: int = 32,
79
80
81
82
        encoder_act_fn: str = "silu",
        encoder_block_out_channels: Tuple[int, ...] = (128, 256, 512, 512),
        encoder_double_z: bool = True,
        encoder_down_block_types: Tuple[str, ...] = (
83
84
85
86
87
            "DownEncoderBlock2D",
            "DownEncoderBlock2D",
            "DownEncoderBlock2D",
            "DownEncoderBlock2D",
        ),
88
89
90
91
92
93
94
        encoder_in_channels: int = 3,
        encoder_layers_per_block: int = 2,
        encoder_norm_num_groups: int = 32,
        encoder_out_channels: int = 4,
        decoder_add_attention: bool = False,
        decoder_block_out_channels: Tuple[int, ...] = (320, 640, 1024, 1024),
        decoder_down_block_types: Tuple[str, ...] = (
95
96
97
98
99
            "ResnetDownsampleBlock2D",
            "ResnetDownsampleBlock2D",
            "ResnetDownsampleBlock2D",
            "ResnetDownsampleBlock2D",
        ),
100
101
102
103
104
105
106
107
108
109
        decoder_downsample_padding: int = 1,
        decoder_in_channels: int = 7,
        decoder_layers_per_block: int = 3,
        decoder_norm_eps: float = 1e-05,
        decoder_norm_num_groups: int = 32,
        decoder_num_train_timesteps: int = 1024,
        decoder_out_channels: int = 6,
        decoder_resnet_time_scale_shift: str = "scale_shift",
        decoder_time_embedding_type: str = "learned",
        decoder_up_block_types: Tuple[str, ...] = (
110
111
112
113
114
115
            "ResnetUpsampleBlock2D",
            "ResnetUpsampleBlock2D",
            "ResnetUpsampleBlock2D",
            "ResnetUpsampleBlock2D",
        ),
    ):
Will Berman's avatar
Will Berman committed
116
        super().__init__()
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
        self.encoder = Encoder(
            act_fn=encoder_act_fn,
            block_out_channels=encoder_block_out_channels,
            double_z=encoder_double_z,
            down_block_types=encoder_down_block_types,
            in_channels=encoder_in_channels,
            layers_per_block=encoder_layers_per_block,
            norm_num_groups=encoder_norm_num_groups,
            out_channels=encoder_out_channels,
        )

        self.decoder_unet = UNet2DModel(
            add_attention=decoder_add_attention,
            block_out_channels=decoder_block_out_channels,
            down_block_types=decoder_down_block_types,
            downsample_padding=decoder_downsample_padding,
            in_channels=decoder_in_channels,
            layers_per_block=decoder_layers_per_block,
            norm_eps=decoder_norm_eps,
            norm_num_groups=decoder_norm_num_groups,
            num_train_timesteps=decoder_num_train_timesteps,
            out_channels=decoder_out_channels,
            resnet_time_scale_shift=decoder_resnet_time_scale_shift,
            time_embedding_type=decoder_time_embedding_type,
            up_block_types=decoder_up_block_types,
        )
Will Berman's avatar
Will Berman committed
143
        self.decoder_scheduler = ConsistencyDecoderScheduler()
144
        self.register_to_config(block_out_channels=encoder_block_out_channels)
145
        self.register_to_config(force_upcast=False)
Will Berman's avatar
Will Berman committed
146
147
148
149
150
151
152
153
154
155
156
157
158
159
        self.register_buffer(
            "means",
            torch.tensor([0.38862467, 0.02253063, 0.07381133, -0.0171294])[None, :, None, None],
            persistent=False,
        )
        self.register_buffer(
            "stds", torch.tensor([0.9654121, 1.0440036, 0.76147926, 0.77022034])[None, :, None, None], persistent=False
        )

        self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1)

        self.use_slicing = False
        self.use_tiling = False

160
161
162
163
164
165
166
167
168
169
        # only relevant if vae tiling is enabled
        self.tile_sample_min_size = self.config.sample_size
        sample_size = (
            self.config.sample_size[0]
            if isinstance(self.config.sample_size, (list, tuple))
            else self.config.sample_size
        )
        self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
        self.tile_overlap_factor = 0.25

170
    # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.enable_tiling
Will Berman's avatar
Will Berman committed
171
172
173
174
175
176
177
178
    def enable_tiling(self, use_tiling: bool = True):
        r"""
        Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
        compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
        processing larger images.
        """
        self.use_tiling = use_tiling

179
    # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.disable_tiling
Will Berman's avatar
Will Berman committed
180
181
182
183
184
185
186
    def disable_tiling(self):
        r"""
        Disable tiled VAE decoding. If `enable_tiling` was previously enabled, this method will go back to computing
        decoding in one step.
        """
        self.enable_tiling(False)

187
    # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.enable_slicing
Will Berman's avatar
Will Berman committed
188
189
190
191
192
193
194
    def enable_slicing(self):
        r"""
        Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
        compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
        """
        self.use_slicing = True

195
    # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.disable_slicing
Will Berman's avatar
Will Berman committed
196
197
198
199
200
201
202
203
    def disable_slicing(self):
        r"""
        Disable sliced VAE decoding. If `enable_slicing` was previously enabled, this method will go back to computing
        decoding in one step.
        """
        self.use_slicing = False

    @property
204
    # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.attn_processors
Will Berman's avatar
Will Berman committed
205
206
207
208
209
210
211
212
213
214
215
    def attn_processors(self) -> Dict[str, AttentionProcessor]:
        r"""
        Returns:
            `dict` of attention processors: A dictionary containing all attention processors used in the model with
            indexed by its weight name.
        """
        # set recursively
        processors = {}

        def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
            if hasattr(module, "get_processor"):
216
                processors[f"{name}.processor"] = module.get_processor()
Will Berman's avatar
Will Berman committed
217
218
219
220
221
222
223
224
225
226
227

            for sub_name, child in module.named_children():
                fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)

            return processors

        for name, module in self.named_children():
            fn_recursive_add_processors(name, module, processors)

        return processors

228
    # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_attn_processor
229
    def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
Will Berman's avatar
Will Berman committed
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
        r"""
        Sets the attention processor to use to compute attention.

        Parameters:
            processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
                The instantiated processor class or a dictionary of processor classes that will be set as the processor
                for **all** `Attention` layers.

                If `processor` is a dict, the key needs to define the path to the corresponding cross attention
                processor. This is strongly recommended when setting trainable attention processors.

        """
        count = len(self.attn_processors.keys())

        if isinstance(processor, dict) and len(processor) != count:
            raise ValueError(
                f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
                f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
            )

        def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
            if hasattr(module, "set_processor"):
                if not isinstance(processor, dict):
253
                    module.set_processor(processor)
Will Berman's avatar
Will Berman committed
254
                else:
255
                    module.set_processor(processor.pop(f"{name}.processor"))
Will Berman's avatar
Will Berman committed
256
257
258
259
260
261
262

            for sub_name, child in module.named_children():
                fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)

        for name, module in self.named_children():
            fn_recursive_attn_processor(name, module, processor)

263
    # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
Will Berman's avatar
Will Berman committed
264
265
266
267
268
269
270
271
272
273
274
275
276
    def set_default_attn_processor(self):
        """
        Disables custom attention processors and sets the default attention implementation.
        """
        if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
            processor = AttnAddedKVProcessor()
        elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
            processor = AttnProcessor()
        else:
            raise ValueError(
                f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
            )

277
        self.set_attn_processor(processor)
Will Berman's avatar
Will Berman committed
278
279
280

    @apply_forward_hook
    def encode(
281
        self, x: torch.Tensor, return_dict: bool = True
Will Berman's avatar
Will Berman committed
282
283
284
285
286
    ) -> Union[ConsistencyDecoderVAEOutput, Tuple[DiagonalGaussianDistribution]]:
        """
        Encode a batch of images into latents.

        Args:
287
            x (`torch.Tensor`): Input batch of images.
Will Berman's avatar
Will Berman committed
288
            return_dict (`bool`, *optional*, defaults to `True`):
289
290
                Whether to return a [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`]
                instead of a plain tuple.
Will Berman's avatar
Will Berman committed
291
292
293

        Returns:
                The latent representations of the encoded images. If `return_dict` is True, a
294
295
                [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] is returned, otherwise a
                plain `tuple` is returned.
Will Berman's avatar
Will Berman committed
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
        """
        if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
            return self.tiled_encode(x, return_dict=return_dict)

        if self.use_slicing and x.shape[0] > 1:
            encoded_slices = [self.encoder(x_slice) for x_slice in x.split(1)]
            h = torch.cat(encoded_slices)
        else:
            h = self.encoder(x)

        moments = self.quant_conv(h)
        posterior = DiagonalGaussianDistribution(moments)

        if not return_dict:
            return (posterior,)

        return ConsistencyDecoderVAEOutput(latent_dist=posterior)

    @apply_forward_hook
    def decode(
        self,
317
        z: torch.Tensor,
Will Berman's avatar
Will Berman committed
318
319
        generator: Optional[torch.Generator] = None,
        return_dict: bool = True,
320
        num_inference_steps: int = 2,
321
    ) -> Union[DecoderOutput, Tuple[torch.Tensor]]:
322
323
324
325
        """
        Decodes the input latent vector `z` using the consistency decoder VAE model.

        Args:
326
            z (torch.Tensor): The input latent vector.
327
328
329
330
331
            generator (Optional[torch.Generator]): The random number generator. Default is None.
            return_dict (bool): Whether to return the output as a dictionary. Default is True.
            num_inference_steps (int): The number of inference steps. Default is 2.

        Returns:
332
            Union[DecoderOutput, Tuple[torch.Tensor]]: The decoded output.
333
334

        """
Will Berman's avatar
Will Berman committed
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
        z = (z * self.config.scaling_factor - self.means) / self.stds

        scale_factor = 2 ** (len(self.config.block_out_channels) - 1)
        z = F.interpolate(z, mode="nearest", scale_factor=scale_factor)

        batch_size, _, height, width = z.shape

        self.decoder_scheduler.set_timesteps(num_inference_steps, device=self.device)

        x_t = self.decoder_scheduler.init_noise_sigma * randn_tensor(
            (batch_size, 3, height, width), generator=generator, dtype=z.dtype, device=z.device
        )

        for t in self.decoder_scheduler.timesteps:
            model_input = torch.concat([self.decoder_scheduler.scale_model_input(x_t, t), z], dim=1)
            model_output = self.decoder_unet(model_input, t).sample[:, :3, :, :]
            prev_sample = self.decoder_scheduler.step(model_output, t, x_t, generator).prev_sample
            x_t = prev_sample

        x_0 = x_t

        if not return_dict:
            return (x_0,)

        return DecoderOutput(sample=x_0)

361
    # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.blend_v
362
    def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
Will Berman's avatar
Will Berman committed
363
364
365
366
367
        blend_extent = min(a.shape[2], b.shape[2], blend_extent)
        for y in range(blend_extent):
            b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
        return b

368
    # Copied from diffusers.models.autoencoders.autoencoder_kl.AutoencoderKL.blend_h
369
    def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
Will Berman's avatar
Will Berman committed
370
371
372
373
374
        blend_extent = min(a.shape[3], b.shape[3], blend_extent)
        for x in range(blend_extent):
            b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
        return b

375
    def tiled_encode(self, x: torch.Tensor, return_dict: bool = True) -> Union[ConsistencyDecoderVAEOutput, Tuple]:
Will Berman's avatar
Will Berman committed
376
377
378
379
380
381
382
383
384
        r"""Encode a batch of images using a tiled encoder.

        When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
        steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is
        different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the
        tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the
        output, but they should be much less noticeable.

        Args:
385
            x (`torch.Tensor`): Input batch of images.
Will Berman's avatar
Will Berman committed
386
            return_dict (`bool`, *optional*, defaults to `True`):
387
388
                Whether or not to return a [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`]
                instead of a plain tuple.
Will Berman's avatar
Will Berman committed
389
390

        Returns:
391
392
393
            [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`] or `tuple`:
                If return_dict is True, a [`~models.autoencoders.consistency_decoder_vae.ConsistencyDecoderVAEOutput`]
                is returned, otherwise a plain `tuple` is returned.
Will Berman's avatar
Will Berman committed
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
        """
        overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
        blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor)
        row_limit = self.tile_latent_min_size - blend_extent

        # Split the image into 512x512 tiles and encode them separately.
        rows = []
        for i in range(0, x.shape[2], overlap_size):
            row = []
            for j in range(0, x.shape[3], overlap_size):
                tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
                tile = self.encoder(tile)
                tile = self.quant_conv(tile)
                row.append(tile)
            rows.append(row)
        result_rows = []
        for i, row in enumerate(rows):
            result_row = []
            for j, tile in enumerate(row):
                # blend the above tile and the left tile
                # to the current tile and add the current tile to the result row
                if i > 0:
                    tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
                if j > 0:
                    tile = self.blend_h(row[j - 1], tile, blend_extent)
                result_row.append(tile[:, :, :row_limit, :row_limit])
            result_rows.append(torch.cat(result_row, dim=3))

        moments = torch.cat(result_rows, dim=2)
        posterior = DiagonalGaussianDistribution(moments)

        if not return_dict:
            return (posterior,)

        return ConsistencyDecoderVAEOutput(latent_dist=posterior)

    def forward(
        self,
432
        sample: torch.Tensor,
Will Berman's avatar
Will Berman committed
433
434
435
        sample_posterior: bool = False,
        return_dict: bool = True,
        generator: Optional[torch.Generator] = None,
436
    ) -> Union[DecoderOutput, Tuple[torch.Tensor]]:
Will Berman's avatar
Will Berman committed
437
438
        r"""
        Args:
439
            sample (`torch.Tensor`): Input sample.
Will Berman's avatar
Will Berman committed
440
441
442
443
            sample_posterior (`bool`, *optional*, defaults to `False`):
                Whether to sample from the posterior.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
444
445
446
447
448
449
            generator (`torch.Generator`, *optional*, defaults to `None`):
                Generator to use for sampling.

        Returns:
            [`DecoderOutput`] or `tuple`:
                If return_dict is True, a [`DecoderOutput`] is returned, otherwise a plain `tuple` is returned.
Will Berman's avatar
Will Berman committed
450
451
452
453
454
455
456
457
458
459
460
461
462
        """
        x = sample
        posterior = self.encode(x).latent_dist
        if sample_posterior:
            z = posterior.sample(generator=generator)
        else:
            z = posterior.mode()
        dec = self.decode(z, generator=generator).sample

        if not return_dict:
            return (dec,)

        return DecoderOutput(sample=dec)