"src/vscode:/vscode.git/clone" did not exist on "5a6edac087915c7a92f3317067e82c1097b98307"
vae.py 15.5 KB
Newer Older
Patrick von Platen's avatar
Patrick von Platen committed
1
# Copyright 2023 The HuggingFace Team. All rights reserved.
2
3
4
5
6
7
8
9
10
11
12
13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
from dataclasses import dataclass
15
from typing import Optional
Partho's avatar
Partho committed
16

patil-suraj's avatar
patil-suraj committed
17
18
19
20
import numpy as np
import torch
import torch.nn as nn

21
from ..utils import BaseOutput, is_torch_version, randn_tensor
YiYi Xu's avatar
YiYi Xu committed
22
from .attention_processor import SpatialNorm
23
from .unet_2d_blocks import UNetMidBlock2D, get_down_block, get_up_block
patil-suraj's avatar
patil-suraj committed
24
25


26
27
28
29
30
31
32
33
34
35
36
37
38
@dataclass
class DecoderOutput(BaseOutput):
    """
    Output of decoding method.

    Args:
        sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
            Decoded output sample of the model. Output of the last layer of the model.
    """

    sample: torch.FloatTensor


patil-suraj's avatar
patil-suraj committed
39
40
41
class Encoder(nn.Module):
    def __init__(
        self,
42
43
44
45
46
        in_channels=3,
        out_channels=3,
        down_block_types=("DownEncoderBlock2D",),
        block_out_channels=(64,),
        layers_per_block=2,
47
        norm_num_groups=32,
48
        act_fn="silu",
patil-suraj's avatar
patil-suraj committed
49
50
51
        double_z=True,
    ):
        super().__init__()
52
53
        self.layers_per_block = layers_per_block

54
55
56
57
58
59
60
        self.conv_in = torch.nn.Conv2d(
            in_channels,
            block_out_channels[0],
            kernel_size=3,
            stride=1,
            padding=1,
        )
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78

        self.mid_block = None
        self.down_blocks = nn.ModuleList([])

        # down
        output_channel = block_out_channels[0]
        for i, down_block_type in enumerate(down_block_types):
            input_channel = output_channel
            output_channel = block_out_channels[i]
            is_final_block = i == len(block_out_channels) - 1

            down_block = get_down_block(
                down_block_type,
                num_layers=self.layers_per_block,
                in_channels=input_channel,
                out_channels=output_channel,
                add_downsample=not is_final_block,
                resnet_eps=1e-6,
79
                downsample_padding=0,
80
                resnet_act_fn=act_fn,
81
                resnet_groups=norm_num_groups,
82
83
84
85
86
87
88
89
90
91
92
93
94
                attn_num_head_channels=None,
                temb_channels=None,
            )
            self.down_blocks.append(down_block)

        # mid
        self.mid_block = UNetMidBlock2D(
            in_channels=block_out_channels[-1],
            resnet_eps=1e-6,
            resnet_act_fn=act_fn,
            output_scale_factor=1,
            resnet_time_scale_shift="default",
            attn_num_head_channels=None,
95
            resnet_groups=norm_num_groups,
96
            temb_channels=None,
patil-suraj's avatar
patil-suraj committed
97
98
        )

99
        # out
100
        self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[-1], num_groups=norm_num_groups, eps=1e-6)
101
102
103
104
        self.conv_act = nn.SiLU()

        conv_out_channels = 2 * out_channels if double_z else out_channels
        self.conv_out = nn.Conv2d(block_out_channels[-1], conv_out_channels, 3, padding=1)
patil-suraj's avatar
patil-suraj committed
105

106
107
        self.gradient_checkpointing = False

patil-suraj's avatar
patil-suraj committed
108
    def forward(self, x):
109
110
111
        sample = x
        sample = self.conv_in(sample)

112
113
114
115
116
117
118
119
120
        if self.training and self.gradient_checkpointing:

            def create_custom_forward(module):
                def custom_forward(*inputs):
                    return module(*inputs)

                return custom_forward

            # down
121
122
123
124
125
126
127
128
129
130
131
132
133
134
            if is_torch_version(">=", "1.11.0"):
                for down_block in self.down_blocks:
                    sample = torch.utils.checkpoint.checkpoint(
                        create_custom_forward(down_block), sample, use_reentrant=False
                    )
                # middle
                sample = torch.utils.checkpoint.checkpoint(
                    create_custom_forward(self.mid_block), sample, use_reentrant=False
                )
            else:
                for down_block in self.down_blocks:
                    sample = torch.utils.checkpoint.checkpoint(create_custom_forward(down_block), sample)
                # middle
                sample = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block), sample)
135
136
137
138
139

        else:
            # down
            for down_block in self.down_blocks:
                sample = down_block(sample)
patil-suraj's avatar
patil-suraj committed
140

141
142
            # middle
            sample = self.mid_block(sample)
143
144
145
146
147
148
149

        # post-process
        sample = self.conv_norm_out(sample)
        sample = self.conv_act(sample)
        sample = self.conv_out(sample)

        return sample
patil-suraj's avatar
patil-suraj committed
150
151
152
153
154


class Decoder(nn.Module):
    def __init__(
        self,
155
156
157
158
159
        in_channels=3,
        out_channels=3,
        up_block_types=("UpDecoderBlock2D",),
        block_out_channels=(64,),
        layers_per_block=2,
160
        norm_num_groups=32,
161
        act_fn="silu",
YiYi Xu's avatar
YiYi Xu committed
162
        norm_type="group",  # group, spatial
patil-suraj's avatar
patil-suraj committed
163
164
    ):
        super().__init__()
165
166
        self.layers_per_block = layers_per_block

167
168
169
170
171
172
173
        self.conv_in = nn.Conv2d(
            in_channels,
            block_out_channels[-1],
            kernel_size=3,
            stride=1,
            padding=1,
        )
174
175
176
177

        self.mid_block = None
        self.up_blocks = nn.ModuleList([])

YiYi Xu's avatar
YiYi Xu committed
178
179
        temb_channels = in_channels if norm_type == "spatial" else None

180
181
182
183
184
185
        # mid
        self.mid_block = UNetMidBlock2D(
            in_channels=block_out_channels[-1],
            resnet_eps=1e-6,
            resnet_act_fn=act_fn,
            output_scale_factor=1,
YiYi Xu's avatar
YiYi Xu committed
186
            resnet_time_scale_shift="default" if norm_type == "group" else norm_type,
187
            attn_num_head_channels=None,
188
            resnet_groups=norm_num_groups,
YiYi Xu's avatar
YiYi Xu committed
189
            temb_channels=temb_channels,
patil-suraj's avatar
patil-suraj committed
190
191
        )

192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
        # up
        reversed_block_out_channels = list(reversed(block_out_channels))
        output_channel = reversed_block_out_channels[0]
        for i, up_block_type in enumerate(up_block_types):
            prev_output_channel = output_channel
            output_channel = reversed_block_out_channels[i]

            is_final_block = i == len(block_out_channels) - 1

            up_block = get_up_block(
                up_block_type,
                num_layers=self.layers_per_block + 1,
                in_channels=prev_output_channel,
                out_channels=output_channel,
                prev_output_channel=None,
                add_upsample=not is_final_block,
                resnet_eps=1e-6,
                resnet_act_fn=act_fn,
210
                resnet_groups=norm_num_groups,
211
                attn_num_head_channels=None,
YiYi Xu's avatar
YiYi Xu committed
212
213
                temb_channels=temb_channels,
                resnet_time_scale_shift=norm_type,
214
215
216
217
218
            )
            self.up_blocks.append(up_block)
            prev_output_channel = output_channel

        # out
YiYi Xu's avatar
YiYi Xu committed
219
220
221
222
        if norm_type == "spatial":
            self.conv_norm_out = SpatialNorm(block_out_channels[0], temb_channels)
        else:
            self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=1e-6)
223
224
        self.conv_act = nn.SiLU()
        self.conv_out = nn.Conv2d(block_out_channels[0], out_channels, 3, padding=1)
patil-suraj's avatar
patil-suraj committed
225

226
227
        self.gradient_checkpointing = False

YiYi Xu's avatar
YiYi Xu committed
228
    def forward(self, z, latent_embeds=None):
229
230
        sample = z
        sample = self.conv_in(sample)
patil-suraj's avatar
patil-suraj committed
231

232
        upscale_dtype = next(iter(self.up_blocks.parameters())).dtype
233
        if self.training and self.gradient_checkpointing:
patil-suraj's avatar
patil-suraj committed
234

235
236
237
238
239
240
            def create_custom_forward(module):
                def custom_forward(*inputs):
                    return module(*inputs)

                return custom_forward

241
242
243
            if is_torch_version(">=", "1.11.0"):
                # middle
                sample = torch.utils.checkpoint.checkpoint(
YiYi Xu's avatar
YiYi Xu committed
244
                    create_custom_forward(self.mid_block), sample, latent_embeds, use_reentrant=False
245
246
247
248
249
250
                )
                sample = sample.to(upscale_dtype)

                # up
                for up_block in self.up_blocks:
                    sample = torch.utils.checkpoint.checkpoint(
YiYi Xu's avatar
YiYi Xu committed
251
                        create_custom_forward(up_block), sample, latent_embeds, use_reentrant=False
252
253
254
                    )
            else:
                # middle
YiYi Xu's avatar
YiYi Xu committed
255
256
257
                sample = torch.utils.checkpoint.checkpoint(
                    create_custom_forward(self.mid_block), sample, latent_embeds
                )
258
259
260
261
                sample = sample.to(upscale_dtype)

                # up
                for up_block in self.up_blocks:
YiYi Xu's avatar
YiYi Xu committed
262
                    sample = torch.utils.checkpoint.checkpoint(create_custom_forward(up_block), sample, latent_embeds)
263
264
        else:
            # middle
YiYi Xu's avatar
YiYi Xu committed
265
            sample = self.mid_block(sample, latent_embeds)
266
            sample = sample.to(upscale_dtype)
267
268
269

            # up
            for up_block in self.up_blocks:
YiYi Xu's avatar
YiYi Xu committed
270
                sample = up_block(sample, latent_embeds)
patil-suraj's avatar
patil-suraj committed
271

272
        # post-process
YiYi Xu's avatar
YiYi Xu committed
273
274
275
276
        if latent_embeds is None:
            sample = self.conv_norm_out(sample)
        else:
            sample = self.conv_norm_out(sample, latent_embeds)
277
278
279
280
        sample = self.conv_act(sample)
        sample = self.conv_out(sample)

        return sample
patil-suraj's avatar
patil-suraj committed
281
282
283
284
285
286
287
288
289
290
291


class VectorQuantizer(nn.Module):
    """
    Improved version over VectorQuantizer, can be used as a drop-in replacement. Mostly avoids costly matrix
    multiplications and allows for post-hoc remapping of indices.
    """

    # NOTE: due to a bug the beta term was applied to the wrong term. for
    # backwards compatibility we use the buggy version by default, but you can
    # specify legacy=False to fix it.
Will Berman's avatar
Will Berman committed
292
293
294
    def __init__(
        self, n_e, vq_embed_dim, beta, remap=None, unknown_index="random", sane_index_shape=False, legacy=True
    ):
patil-suraj's avatar
patil-suraj committed
295
296
        super().__init__()
        self.n_e = n_e
Will Berman's avatar
Will Berman committed
297
        self.vq_embed_dim = vq_embed_dim
patil-suraj's avatar
patil-suraj committed
298
299
300
        self.beta = beta
        self.legacy = legacy

Will Berman's avatar
Will Berman committed
301
        self.embedding = nn.Embedding(self.n_e, self.vq_embed_dim)
patil-suraj's avatar
patil-suraj committed
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
        self.embedding.weight.data.uniform_(-1.0 / self.n_e, 1.0 / self.n_e)

        self.remap = remap
        if self.remap is not None:
            self.register_buffer("used", torch.tensor(np.load(self.remap)))
            self.re_embed = self.used.shape[0]
            self.unknown_index = unknown_index  # "random" or "extra" or integer
            if self.unknown_index == "extra":
                self.unknown_index = self.re_embed
                self.re_embed = self.re_embed + 1
            print(
                f"Remapping {self.n_e} indices to {self.re_embed} indices. "
                f"Using {self.unknown_index} for unknown indices."
            )
        else:
            self.re_embed = n_e

        self.sane_index_shape = sane_index_shape

    def remap_to_used(self, inds):
        ishape = inds.shape
        assert len(ishape) > 1
        inds = inds.reshape(ishape[0], -1)
        used = self.used.to(inds)
        match = (inds[:, :, None] == used[None, None, ...]).long()
        new = match.argmax(-1)
        unknown = match.sum(2) < 1
        if self.unknown_index == "random":
            new[unknown] = torch.randint(0, self.re_embed, size=new[unknown].shape).to(device=new.device)
        else:
            new[unknown] = self.unknown_index
        return new.reshape(ishape)

    def unmap_to_all(self, inds):
        ishape = inds.shape
        assert len(ishape) > 1
        inds = inds.reshape(ishape[0], -1)
        used = self.used.to(inds)
        if self.re_embed > self.used.shape[0]:  # extra token
            inds[inds >= self.used.shape[0]] = 0  # simply set to zero
        back = torch.gather(used[None, :][inds.shape[0] * [0], :], 1, inds)
        return back.reshape(ishape)

    def forward(self, z):
        # reshape z -> (batch, height, width, channel) and flatten
        z = z.permute(0, 2, 3, 1).contiguous()
Will Berman's avatar
Will Berman committed
348
        z_flattened = z.view(-1, self.vq_embed_dim)
patil-suraj's avatar
patil-suraj committed
349

350
351
        # distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
        min_encoding_indices = torch.argmin(torch.cdist(z_flattened, self.embedding.weight), dim=1)
patil-suraj's avatar
patil-suraj committed
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405

        z_q = self.embedding(min_encoding_indices).view(z.shape)
        perplexity = None
        min_encodings = None

        # compute loss for embedding
        if not self.legacy:
            loss = self.beta * torch.mean((z_q.detach() - z) ** 2) + torch.mean((z_q - z.detach()) ** 2)
        else:
            loss = torch.mean((z_q.detach() - z) ** 2) + self.beta * torch.mean((z_q - z.detach()) ** 2)

        # preserve gradients
        z_q = z + (z_q - z).detach()

        # reshape back to match original input shape
        z_q = z_q.permute(0, 3, 1, 2).contiguous()

        if self.remap is not None:
            min_encoding_indices = min_encoding_indices.reshape(z.shape[0], -1)  # add batch axis
            min_encoding_indices = self.remap_to_used(min_encoding_indices)
            min_encoding_indices = min_encoding_indices.reshape(-1, 1)  # flatten

        if self.sane_index_shape:
            min_encoding_indices = min_encoding_indices.reshape(z_q.shape[0], z_q.shape[2], z_q.shape[3])

        return z_q, loss, (perplexity, min_encodings, min_encoding_indices)

    def get_codebook_entry(self, indices, shape):
        # shape specifying (batch, height, width, channel)
        if self.remap is not None:
            indices = indices.reshape(shape[0], -1)  # add batch axis
            indices = self.unmap_to_all(indices)
            indices = indices.reshape(-1)  # flatten again

        # get quantized latent vectors
        z_q = self.embedding(indices)

        if shape is not None:
            z_q = z_q.view(shape)
            # reshape back to match original input shape
            z_q = z_q.permute(0, 3, 1, 2).contiguous()

        return z_q


class DiagonalGaussianDistribution(object):
    def __init__(self, parameters, deterministic=False):
        self.parameters = parameters
        self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)
        self.logvar = torch.clamp(self.logvar, -30.0, 20.0)
        self.deterministic = deterministic
        self.std = torch.exp(0.5 * self.logvar)
        self.var = torch.exp(self.logvar)
        if self.deterministic:
406
407
408
            self.var = self.std = torch.zeros_like(
                self.mean, device=self.parameters.device, dtype=self.parameters.dtype
            )
patil-suraj's avatar
patil-suraj committed
409

Partho's avatar
Partho committed
410
    def sample(self, generator: Optional[torch.Generator] = None) -> torch.FloatTensor:
411
        # make sure sample is on the same device as the parameters and has same dtype
412
413
414
        sample = randn_tensor(
            self.mean.shape, generator=generator, device=self.parameters.device, dtype=self.parameters.dtype
        )
415
        x = self.mean + self.std * sample
patil-suraj's avatar
patil-suraj committed
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
        return x

    def kl(self, other=None):
        if self.deterministic:
            return torch.Tensor([0.0])
        else:
            if other is None:
                return 0.5 * torch.sum(torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar, dim=[1, 2, 3])
            else:
                return 0.5 * torch.sum(
                    torch.pow(self.mean - other.mean, 2) / other.var
                    + self.var / other.var
                    - 1.0
                    - self.logvar
                    + other.logvar,
                    dim=[1, 2, 3],
                )

    def nll(self, sample, dims=[1, 2, 3]):
        if self.deterministic:
            return torch.Tensor([0.0])
        logtwopi = np.log(2.0 * np.pi)
        return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, dim=dims)

    def mode(self):
        return self.mean