autoencoder_kl.py 20.9 KB
Newer Older
Aryan's avatar
Aryan committed
1
# Copyright 2025 The HuggingFace Team. All rights reserved.
2
3
4
5
6
7
8
9
10
11
12
13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
from typing import Optional, Tuple, Union
15
16
17
18

import torch
import torch.nn as nn

19
from ...configuration_utils import ConfigMixin, register_to_config
20
from ...loaders import PeftAdapterMixin
21
from ...loaders.single_file_model import FromOriginalModelMixin
22
from ...utils import deprecate
23
from ...utils.accelerate_utils import apply_forward_hook
24
from ..attention import AttentionMixin
25
from ..attention_processor import (
26
27
    ADDED_KV_ATTENTION_PROCESSORS,
    CROSS_ATTENTION_PROCESSORS,
28
    Attention,
29
30
    AttnAddedKVProcessor,
    AttnProcessor,
31
    FusedAttnProcessor2_0,
32
)
33
34
from ..modeling_outputs import AutoencoderKLOutput
from ..modeling_utils import ModelMixin
35
from .vae import AutoencoderMixin, Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
36

renzhc's avatar
renzhc committed
37
38
from lightop import miopenConvBiasAdd as ConvBiasAdd
from lightop import miopenConvBias as ConvBias
39

40
41
42
class AutoencoderKL(
    ModelMixin, AttentionMixin, AutoencoderMixin, ConfigMixin, FromOriginalModelMixin, PeftAdapterMixin
):
Steven Liu's avatar
Steven Liu committed
43
44
    r"""
    A VAE model with KL loss for encoding images into latents and decoding latent representations into images.
45

Steven Liu's avatar
Steven Liu committed
46
47
    This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
    for all models (such as downloading or saving).
48
49
50
51

    Parameters:
        in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
        out_channels (int,  *optional*, defaults to 3): Number of channels in the output.
Steven Liu's avatar
Steven Liu committed
52
53
54
55
56
57
        down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
            Tuple of downsample block types.
        up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
            Tuple of upsample block types.
        block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
            Tuple of block output channels.
58
        act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
59
        latent_channels (`int`, *optional*, defaults to 4): Number of channels in the latent space.
Steven Liu's avatar
Steven Liu committed
60
        sample_size (`int`, *optional*, defaults to `32`): Sample input size.
61
62
63
64
65
66
        scaling_factor (`float`, *optional*, defaults to 0.18215):
            The component-wise standard deviation of the trained latent space computed using the first batch of the
            training set. This is used to scale the latent space to have unit variance when training the diffusion
            model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
            diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
            / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
Quentin Gallouédec's avatar
Quentin Gallouédec committed
67
            Synthesis with Latent Diffusion Models](https://huggingface.co/papers/2112.10752) paper.
68
69
        force_upcast (`bool`, *optional*, default to `True`):
            If enabled it will force the VAE to run in float32 for high image resolution pipelines, such as SD-XL. VAE
70
71
            can be fine-tuned / trained to a lower range without losing too much precision in which case `force_upcast`
            can be set to `False` - see: https://huggingface.co/madebyollin/sdxl-vae-fp16-fix
72
73
74
        mid_block_add_attention (`bool`, *optional*, default to `True`):
            If enabled, the mid_block of the Encoder and Decoder will have attention blocks. If set to false, the
            mid_block will only have resnet blocks
75
76
    """

77
    _supports_gradient_checkpointing = True
78
    _no_split_modules = ["BasicTransformerBlock", "ResnetBlock2D"]
79
    _group_offload_block_modules = ["quant_conv", "post_quant_conv", "encoder", "decoder"]
80

81
82
83
84
85
    @register_to_config
    def __init__(
        self,
        in_channels: int = 3,
        out_channels: int = 3,
86
87
88
        down_block_types: Tuple[str, ...] = ("DownEncoderBlock2D",),
        up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",),
        block_out_channels: Tuple[int, ...] = (64,),
89
90
91
92
93
        layers_per_block: int = 1,
        act_fn: str = "silu",
        latent_channels: int = 4,
        norm_num_groups: int = 32,
        sample_size: int = 32,
94
        scaling_factor: float = 0.18215,
Dhruv Nair's avatar
Dhruv Nair committed
95
        shift_factor: Optional[float] = None,
96
97
        latents_mean: Optional[Tuple[float]] = None,
        latents_std: Optional[Tuple[float]] = None,
98
        force_upcast: bool = True,
Dhruv Nair's avatar
Dhruv Nair committed
99
100
        use_quant_conv: bool = True,
        use_post_quant_conv: bool = True,
101
        mid_block_add_attention: bool = True,
102
103
104
105
106
107
108
109
110
111
112
113
114
    ):
        super().__init__()

        # pass init params to Encoder
        self.encoder = Encoder(
            in_channels=in_channels,
            out_channels=latent_channels,
            down_block_types=down_block_types,
            block_out_channels=block_out_channels,
            layers_per_block=layers_per_block,
            act_fn=act_fn,
            norm_num_groups=norm_num_groups,
            double_z=True,
115
            mid_block_add_attention=mid_block_add_attention,
116
117
118
119
120
121
122
123
124
125
126
        )

        # pass init params to Decoder
        self.decoder = Decoder(
            in_channels=latent_channels,
            out_channels=out_channels,
            up_block_types=up_block_types,
            block_out_channels=block_out_channels,
            layers_per_block=layers_per_block,
            norm_num_groups=norm_num_groups,
            act_fn=act_fn,
127
            mid_block_add_attention=mid_block_add_attention,
128
129
        )

Dhruv Nair's avatar
Dhruv Nair committed
130
131
        self.quant_conv = nn.Conv2d(2 * latent_channels, 2 * latent_channels, 1) if use_quant_conv else None
        self.post_quant_conv = nn.Conv2d(latent_channels, latent_channels, 1) if use_post_quant_conv else None
132
133
134
135
136
137
138
139
140
141
142

        self.use_slicing = False
        self.use_tiling = False

        # only relevant if vae tiling is enabled
        self.tile_sample_min_size = self.config.sample_size
        sample_size = (
            self.config.sample_size[0]
            if isinstance(self.config.sample_size, (list, tuple))
            else self.config.sample_size
        )
143
        self.tile_latent_min_size = int(sample_size / (2 ** (len(self.config.block_out_channels) - 1)))
144
145
        self.tile_overlap_factor = 0.25

146
    # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor
147
148
149
150
    def set_default_attn_processor(self):
        """
        Disables custom attention processors and sets the default attention implementation.
        """
151
152
153
154
155
156
157
158
159
        if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
            processor = AttnAddedKVProcessor()
        elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
            processor = AttnProcessor()
        else:
            raise ValueError(
                f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
            )

160
        self.set_attn_processor(processor)
161

162
163
164
165
166
167
168
169
170
171
172
173
    def _encode(self, x: torch.Tensor) -> torch.Tensor:
        batch_size, num_channels, height, width = x.shape

        if self.use_tiling and (width > self.tile_sample_min_size or height > self.tile_sample_min_size):
            return self._tiled_encode(x)

        enc = self.encoder(x)
        if self.quant_conv is not None:
            enc = self.quant_conv(enc)

        return enc

174
    @apply_forward_hook
175
    def encode(
176
        self, x: torch.Tensor, return_dict: bool = True
177
178
179
180
181
    ) -> Union[AutoencoderKLOutput, Tuple[DiagonalGaussianDistribution]]:
        """
        Encode a batch of images into latents.

        Args:
182
            x (`torch.Tensor`): Input batch of images.
183
            return_dict (`bool`, *optional*, defaults to `True`):
Dhruv Nair's avatar
Dhruv Nair committed
184
                Whether to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
185
186
187

        Returns:
                The latent representations of the encoded images. If `return_dict` is True, a
Dhruv Nair's avatar
Dhruv Nair committed
188
                [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain `tuple` is returned.
189
        """
Patrick von Platen's avatar
Patrick von Platen committed
190
        if self.use_slicing and x.shape[0] > 1:
191
            encoded_slices = [self._encode(x_slice) for x_slice in x.split(1)]
Patrick von Platen's avatar
Patrick von Platen committed
192
193
            h = torch.cat(encoded_slices)
        else:
194
            h = self._encode(x)
Dhruv Nair's avatar
Dhruv Nair committed
195

196
        posterior = DiagonalGaussianDistribution(h)
197
198
199
200
201
202

        if not return_dict:
            return (posterior,)

        return AutoencoderKLOutput(latent_dist=posterior)

203
    def _decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
204
205
206
        if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
            return self.tiled_decode(z, return_dict=return_dict)

Dhruv Nair's avatar
Dhruv Nair committed
207
208
209
        if self.post_quant_conv is not None:
            z = self.post_quant_conv(z)

210
211
212
213
214
215
216
        dec = self.decoder(z)

        if not return_dict:
            return (dec,)

        return DecoderOutput(sample=dec)

217
    @apply_forward_hook
Dhruv Nair's avatar
Dhruv Nair committed
218
219
220
    def decode(
        self, z: torch.FloatTensor, return_dict: bool = True, generator=None
    ) -> Union[DecoderOutput, torch.FloatTensor]:
221
222
223
224
        """
        Decode a batch of images.

        Args:
225
            z (`torch.Tensor`): Input batch of latent vectors.
226
227
228
229
230
231
232
233
234
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.

        Returns:
            [`~models.vae.DecoderOutput`] or `tuple`:
                If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
                returned.

        """
235
236
237
238
        if self.use_slicing and z.shape[0] > 1:
            decoded_slices = [self._decode(z_slice).sample for z_slice in z.split(1)]
            decoded = torch.cat(decoded_slices)
        else:
Dhruv Nair's avatar
Dhruv Nair committed
239
            decoded = self._decode(z).sample
240
241
242
243
244
245

        if not return_dict:
            return (decoded,)

        return DecoderOutput(sample=decoded)

246
    def blend_v(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
247
248
        blend_extent = min(a.shape[2], b.shape[2], blend_extent)
        for y in range(blend_extent):
249
250
251
            b[:, :, y, :] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
        return b

252
    def blend_h(self, a: torch.Tensor, b: torch.Tensor, blend_extent: int) -> torch.Tensor:
253
254
        blend_extent = min(a.shape[3], b.shape[3], blend_extent)
        for x in range(blend_extent):
255
256
257
            b[:, :, :, x] = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
        return b

258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
    def _tiled_encode(self, x: torch.Tensor) -> torch.Tensor:
        r"""Encode a batch of images using a tiled encoder.

        When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
        steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is
        different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the
        tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the
        output, but they should be much less noticeable.

        Args:
            x (`torch.Tensor`): Input batch of images.

        Returns:
            `torch.Tensor`:
                The latent representation of the encoded videos.
        """

        overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
        blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor)
        row_limit = self.tile_latent_min_size - blend_extent

        # Split the image into 512x512 tiles and encode them separately.
        rows = []
        for i in range(0, x.shape[2], overlap_size):
            row = []
            for j in range(0, x.shape[3], overlap_size):
                tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
                tile = self.encoder(tile)
                if self.config.use_quant_conv:
                    tile = self.quant_conv(tile)
                row.append(tile)
            rows.append(row)
        result_rows = []
        for i, row in enumerate(rows):
            result_row = []
            for j, tile in enumerate(row):
                # blend the above tile and the left tile
                # to the current tile and add the current tile to the result row
                if i > 0:
                    tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
                if j > 0:
                    tile = self.blend_h(row[j - 1], tile, blend_extent)
                result_row.append(tile[:, :, :row_limit, :row_limit])
            result_rows.append(torch.cat(result_row, dim=3))

        enc = torch.cat(result_rows, dim=2)
        return enc

306
    def tiled_encode(self, x: torch.Tensor, return_dict: bool = True) -> AutoencoderKLOutput:
307
        r"""Encode a batch of images using a tiled encoder.
308

309
        When this option is enabled, the VAE will split the input tensor into tiles to compute encoding in several
Steven Liu's avatar
Steven Liu committed
310
311
        steps. This is useful to keep memory use constant regardless of image size. The end result of tiled encoding is
        different from non-tiled encoding because each tile uses a different encoder. To avoid tiling artifacts, the
312
        tiles overlap and are blended together to form a smooth output. You may still see tile-sized changes in the
Steven Liu's avatar
Steven Liu committed
313
314
315
        output, but they should be much less noticeable.

        Args:
316
            x (`torch.Tensor`): Input batch of images.
Steven Liu's avatar
Steven Liu committed
317
            return_dict (`bool`, *optional*, defaults to `True`):
Dhruv Nair's avatar
Dhruv Nair committed
318
                Whether or not to return a [`~models.autoencoder_kl.AutoencoderKLOutput`] instead of a plain tuple.
Steven Liu's avatar
Steven Liu committed
319
320

        Returns:
Dhruv Nair's avatar
Dhruv Nair committed
321
322
323
            [`~models.autoencoder_kl.AutoencoderKLOutput`] or `tuple`:
                If return_dict is True, a [`~models.autoencoder_kl.AutoencoderKLOutput`] is returned, otherwise a plain
                `tuple` is returned.
324
        """
325
326
327
328
329
330
331
        deprecation_message = (
            "The tiled_encode implementation supporting the `return_dict` parameter is deprecated. In the future, the "
            "implementation of this method will be replaced with that of `_tiled_encode` and you will no longer be able "
            "to pass `return_dict`. You will also have to create a `DiagonalGaussianDistribution()` from the returned value."
        )
        deprecate("tiled_encode", "1.0.0", deprecation_message, standard_warn=False)

332
333
334
335
336
337
338
339
340
341
342
        overlap_size = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor))
        blend_extent = int(self.tile_latent_min_size * self.tile_overlap_factor)
        row_limit = self.tile_latent_min_size - blend_extent

        # Split the image into 512x512 tiles and encode them separately.
        rows = []
        for i in range(0, x.shape[2], overlap_size):
            row = []
            for j in range(0, x.shape[3], overlap_size):
                tile = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
                tile = self.encoder(tile)
343
344
                if self.config.use_quant_conv:
                    tile = self.quant_conv(tile)
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
                row.append(tile)
            rows.append(row)
        result_rows = []
        for i, row in enumerate(rows):
            result_row = []
            for j, tile in enumerate(row):
                # blend the above tile and the left tile
                # to the current tile and add the current tile to the result row
                if i > 0:
                    tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
                if j > 0:
                    tile = self.blend_h(row[j - 1], tile, blend_extent)
                result_row.append(tile[:, :, :row_limit, :row_limit])
            result_rows.append(torch.cat(result_row, dim=3))

        moments = torch.cat(result_rows, dim=2)
        posterior = DiagonalGaussianDistribution(moments)

        if not return_dict:
            return (posterior,)

        return AutoencoderKLOutput(latent_dist=posterior)

368
    def tiled_decode(self, z: torch.Tensor, return_dict: bool = True) -> Union[DecoderOutput, torch.Tensor]:
Steven Liu's avatar
Steven Liu committed
369
370
        r"""
        Decode a batch of images using a tiled decoder.
371

372
        Args:
373
            z (`torch.Tensor`): Input batch of latent vectors.
Steven Liu's avatar
Steven Liu committed
374
375
376
377
378
379
380
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`~models.vae.DecoderOutput`] instead of a plain tuple.

        Returns:
            [`~models.vae.DecoderOutput`] or `tuple`:
                If return_dict is True, a [`~models.vae.DecoderOutput`] is returned, otherwise a plain `tuple` is
                returned.
381
382
383
384
385
386
387
388
389
390
391
392
        """
        overlap_size = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor))
        blend_extent = int(self.tile_sample_min_size * self.tile_overlap_factor)
        row_limit = self.tile_sample_min_size - blend_extent

        # Split z into overlapping 64x64 tiles and decode them separately.
        # The tiles have an overlap to avoid seams between tiles.
        rows = []
        for i in range(0, z.shape[2], overlap_size):
            row = []
            for j in range(0, z.shape[3], overlap_size):
                tile = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
393
394
                if self.config.use_post_quant_conv:
                    tile = self.post_quant_conv(tile)
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
                decoded = self.decoder(tile)
                row.append(decoded)
            rows.append(row)
        result_rows = []
        for i, row in enumerate(rows):
            result_row = []
            for j, tile in enumerate(row):
                # blend the above tile and the left tile
                # to the current tile and add the current tile to the result row
                if i > 0:
                    tile = self.blend_v(rows[i - 1][j], tile, blend_extent)
                if j > 0:
                    tile = self.blend_h(row[j - 1], tile, blend_extent)
                result_row.append(tile[:, :, :row_limit, :row_limit])
            result_rows.append(torch.cat(result_row, dim=3))

        dec = torch.cat(result_rows, dim=2)
        if not return_dict:
            return (dec,)

        return DecoderOutput(sample=dec)

417
418
    def forward(
        self,
419
        sample: torch.Tensor,
420
421
422
        sample_posterior: bool = False,
        return_dict: bool = True,
        generator: Optional[torch.Generator] = None,
423
    ) -> Union[DecoderOutput, torch.Tensor]:
424
425
        r"""
        Args:
426
            sample (`torch.Tensor`): Input sample.
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
            sample_posterior (`bool`, *optional*, defaults to `False`):
                Whether to sample from the posterior.
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`DecoderOutput`] instead of a plain tuple.
        """
        x = sample
        posterior = self.encode(x).latent_dist
        if sample_posterior:
            z = posterior.sample(generator=generator)
        else:
            z = posterior.mode()
        dec = self.decode(z).sample

        if not return_dict:
            return (dec,)

        return DecoderOutput(sample=dec)
444

445
    # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections
446
447
    def fuse_qkv_projections(self):
        """
448
449
        Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
        are fused. For cross-attention modules, key and value projection matrices are fused.
450

Steven Liu's avatar
Steven Liu committed
451
        > [!WARNING] > This API is 🧪 experimental.
452
453
454
455
456
457
458
459
460
461
462
463
464
        """
        self.original_attn_processors = None

        for _, attn_processor in self.attn_processors.items():
            if "Added" in str(attn_processor.__class__.__name__):
                raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")

        self.original_attn_processors = self.attn_processors

        for module in self.modules():
            if isinstance(module, Attention):
                module.fuse_projections(fuse=True)

465
466
        self.set_attn_processor(FusedAttnProcessor2_0())

467
    # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
468
469
470
    def unfuse_qkv_projections(self):
        """Disables the fused QKV projection if enabled.

Steven Liu's avatar
Steven Liu committed
471
        > [!WARNING] > This API is 🧪 experimental.
472
473
474
475

        """
        if self.original_attn_processors is not None:
            self.set_attn_processor(self.original_attn_processors)