"vscode:/vscode.git/clone" did not exist on "d8def1ff9432ef60d1067e5e6dde0d700dd95021"
vq_model.py 7.59 KB
Newer Older
1
# Copyright 2024 The HuggingFace Team. All rights reserved.
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass
from typing import Optional, Tuple, Union

import torch
import torch.nn as nn

from ..configuration_utils import ConfigMixin, register_to_config
Dhruv Nair's avatar
Dhruv Nair committed
21
22
from ..utils import BaseOutput
from ..utils.accelerate_utils import apply_forward_hook
23
from .autoencoders.vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
24
25
26
27
28
29
30
31
32
from .modeling_utils import ModelMixin


@dataclass
class VQEncoderOutput(BaseOutput):
    """
    Output of VQModel encoding method.

    Args:
33
        latents (`torch.Tensor` of shape `(batch_size, num_channels, height, width)`):
Steven Liu's avatar
Steven Liu committed
34
            The encoded output sample from the last layer of the model.
35
36
    """

37
    latents: torch.Tensor
38
39
40


class VQModel(ModelMixin, ConfigMixin):
Steven Liu's avatar
Steven Liu committed
41
42
    r"""
    A VQ-VAE model for decoding latent representations.
43

Steven Liu's avatar
Steven Liu committed
44
45
    This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
    for all models (such as downloading or saving).
46
47
48
49

    Parameters:
        in_channels (int, *optional*, defaults to 3): Number of channels in the input image.
        out_channels (int,  *optional*, defaults to 3): Number of channels in the output.
Steven Liu's avatar
Steven Liu committed
50
51
52
53
54
55
        down_block_types (`Tuple[str]`, *optional*, defaults to `("DownEncoderBlock2D",)`):
            Tuple of downsample block types.
        up_block_types (`Tuple[str]`, *optional*, defaults to `("UpDecoderBlock2D",)`):
            Tuple of upsample block types.
        block_out_channels (`Tuple[int]`, *optional*, defaults to `(64,)`):
            Tuple of block output channels.
56
        layers_per_block (`int`, *optional*, defaults to `1`): Number of layers per block.
57
58
        act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
        latent_channels (`int`, *optional*, defaults to `3`): Number of channels in the latent space.
Steven Liu's avatar
Steven Liu committed
59
        sample_size (`int`, *optional*, defaults to `32`): Sample input size.
60
        num_vq_embeddings (`int`, *optional*, defaults to `256`): Number of codebook vectors in the VQ-VAE.
61
        norm_num_groups (`int`, *optional*, defaults to `32`): Number of groups for normalization layers.
62
        vq_embed_dim (`int`, *optional*): Hidden dim of codebook vectors in the VQ-VAE.
63
64
65
66
67
68
69
        scaling_factor (`float`, *optional*, defaults to `0.18215`):
            The component-wise standard deviation of the trained latent space computed using the first batch of the
            training set. This is used to scale the latent space to have unit variance when training the diffusion
            model. The latents are scaled with the formula `z = z * scaling_factor` before being passed to the
            diffusion model. When decoding, the latents are scaled back to the original scale with the formula: `z = 1
            / scaling_factor * z`. For more details, refer to sections 4.3.2 and D.1 of the [High-Resolution Image
            Synthesis with Latent Diffusion Models](https://arxiv.org/abs/2112.10752) paper.
70
71
        norm_type (`str`, *optional*, defaults to `"group"`):
            Type of normalization layer to use. Can be one of `"group"` or `"spatial"`.
72
73
74
75
76
77
78
    """

    @register_to_config
    def __init__(
        self,
        in_channels: int = 3,
        out_channels: int = 3,
79
80
81
        down_block_types: Tuple[str, ...] = ("DownEncoderBlock2D",),
        up_block_types: Tuple[str, ...] = ("UpDecoderBlock2D",),
        block_out_channels: Tuple[int, ...] = (64,),
82
83
84
85
86
87
88
        layers_per_block: int = 1,
        act_fn: str = "silu",
        latent_channels: int = 3,
        sample_size: int = 32,
        num_vq_embeddings: int = 256,
        norm_num_groups: int = 32,
        vq_embed_dim: Optional[int] = None,
89
        scaling_factor: float = 0.18215,
YiYi Xu's avatar
YiYi Xu committed
90
        norm_type: str = "group",  # group, spatial
Will Berman's avatar
Will Berman committed
91
92
93
        mid_block_add_attention=True,
        lookup_from_codebook=False,
        force_upcast=False,
94
95
96
97
98
99
100
101
102
103
104
105
106
    ):
        super().__init__()

        # pass init params to Encoder
        self.encoder = Encoder(
            in_channels=in_channels,
            out_channels=latent_channels,
            down_block_types=down_block_types,
            block_out_channels=block_out_channels,
            layers_per_block=layers_per_block,
            act_fn=act_fn,
            norm_num_groups=norm_num_groups,
            double_z=False,
Will Berman's avatar
Will Berman committed
107
            mid_block_add_attention=mid_block_add_attention,
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
        )

        vq_embed_dim = vq_embed_dim if vq_embed_dim is not None else latent_channels

        self.quant_conv = nn.Conv2d(latent_channels, vq_embed_dim, 1)
        self.quantize = VectorQuantizer(num_vq_embeddings, vq_embed_dim, beta=0.25, remap=None, sane_index_shape=False)
        self.post_quant_conv = nn.Conv2d(vq_embed_dim, latent_channels, 1)

        # pass init params to Decoder
        self.decoder = Decoder(
            in_channels=latent_channels,
            out_channels=out_channels,
            up_block_types=up_block_types,
            block_out_channels=block_out_channels,
            layers_per_block=layers_per_block,
            act_fn=act_fn,
            norm_num_groups=norm_num_groups,
YiYi Xu's avatar
YiYi Xu committed
125
            norm_type=norm_type,
Will Berman's avatar
Will Berman committed
126
            mid_block_add_attention=mid_block_add_attention,
127
128
        )

YiYi Xu's avatar
YiYi Xu committed
129
    @apply_forward_hook
130
    def encode(self, x: torch.Tensor, return_dict: bool = True) -> VQEncoderOutput:
131
132
133
134
135
136
137
138
        h = self.encoder(x)
        h = self.quant_conv(h)

        if not return_dict:
            return (h,)

        return VQEncoderOutput(latents=h)

YiYi Xu's avatar
YiYi Xu committed
139
    @apply_forward_hook
140
    def decode(
141
142
        self, h: torch.Tensor, force_not_quantize: bool = False, return_dict: bool = True, shape=None
    ) -> Union[DecoderOutput, torch.Tensor]:
143
144
        # also go through quantization layer
        if not force_not_quantize:
145
            quant, commit_loss, _ = self.quantize(h)
Will Berman's avatar
Will Berman committed
146
147
        elif self.config.lookup_from_codebook:
            quant = self.quantize.get_codebook_entry(h, shape)
148
            commit_loss = torch.zeros((h.shape[0])).to(h.device, dtype=h.dtype)
149
150
        else:
            quant = h
151
            commit_loss = torch.zeros((h.shape[0])).to(h.device, dtype=h.dtype)
YiYi Xu's avatar
YiYi Xu committed
152
153
        quant2 = self.post_quant_conv(quant)
        dec = self.decoder(quant2, quant if self.config.norm_type == "spatial" else None)
154
155

        if not return_dict:
156
            return dec, commit_loss
157

158
        return DecoderOutput(sample=dec, commit_loss=commit_loss)
159

160
    def forward(
161
162
        self, sample: torch.Tensor, return_dict: bool = True
    ) -> Union[DecoderOutput, Tuple[torch.Tensor, ...]]:
163
        r"""
Steven Liu's avatar
Steven Liu committed
164
165
        The [`VQModel`] forward method.

166
        Args:
167
            sample (`torch.Tensor`): Input sample.
168
            return_dict (`bool`, *optional*, defaults to `True`):
Steven Liu's avatar
Steven Liu committed
169
170
171
172
173
174
                Whether or not to return a [`models.vq_model.VQEncoderOutput`] instead of a plain tuple.

        Returns:
            [`~models.vq_model.VQEncoderOutput`] or `tuple`:
                If return_dict is True, a [`~models.vq_model.VQEncoderOutput`] is returned, otherwise a plain `tuple`
                is returned.
175
        """
176
177

        h = self.encode(sample).latents
178
        dec = self.decode(h)
179
180

        if not return_dict:
181
182
            return dec.sample, dec.commit_loss
        return dec