"git@developer.sourcefind.cn:hehl2/torchaudio.git" did not exist on "030646c004e652853628706de350b159f0912da9"
modeling_text_unet.py 77.5 KB
Newer Older
1
from typing import Any, Dict, List, Optional, Tuple, Union
2
3
4
5

import numpy as np
import torch
import torch.nn as nn
6
import torch.nn.functional as F
7
8

from ...configuration_utils import ConfigMixin, register_to_config
9
from ...models import ModelMixin
10
from ...models.activations import get_activation
Patrick von Platen's avatar
Patrick von Platen committed
11
from ...models.attention import Attention
12
13
14
15
16
17
from ...models.attention_processor import (
    AttentionProcessor,
    AttnAddedKVProcessor,
    AttnAddedKVProcessor2_0,
    AttnProcessor,
)
18
from ...models.dual_transformer_2d import DualTransformer2DModel
YiYi Xu's avatar
YiYi Xu committed
19
20
21
22
23
24
25
26
from ...models.embeddings import (
    GaussianFourierProjection,
    TextImageProjection,
    TextImageTimeEmbedding,
    TextTimeEmbedding,
    TimestepEmbedding,
    Timesteps,
)
27
from ...models.transformer_2d import Transformer2DModel
28
from ...models.unet_2d_condition import UNet2DConditionOutput
29
from ...utils import is_torch_version, logging
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47


logger = logging.get_logger(__name__)  # pylint: disable=invalid-name


def get_down_block(
    down_block_type,
    num_layers,
    in_channels,
    out_channels,
    temb_channels,
    add_downsample,
    resnet_eps,
    resnet_act_fn,
    attn_num_head_channels,
    resnet_groups=None,
    cross_attention_dim=None,
    downsample_padding=None,
48
49
50
    dual_cross_attention=False,
    use_linear_projection=False,
    only_cross_attention=False,
51
    upcast_attention=False,
52
    resnet_time_scale_shift="default",
53
54
55
    resnet_skip_time_act=False,
    resnet_out_scale_factor=1.0,
    cross_attention_norm=None,
56
57
58
59
60
61
62
63
64
65
66
67
68
):
    down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
    if down_block_type == "DownBlockFlat":
        return DownBlockFlat(
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            temb_channels=temb_channels,
            add_downsample=add_downsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
            resnet_groups=resnet_groups,
            downsample_padding=downsample_padding,
69
            resnet_time_scale_shift=resnet_time_scale_shift,
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
        )
    elif down_block_type == "CrossAttnDownBlockFlat":
        if cross_attention_dim is None:
            raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlockFlat")
        return CrossAttnDownBlockFlat(
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            temb_channels=temb_channels,
            add_downsample=add_downsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
            resnet_groups=resnet_groups,
            downsample_padding=downsample_padding,
            cross_attention_dim=cross_attention_dim,
            attn_num_head_channels=attn_num_head_channels,
86
87
88
            dual_cross_attention=dual_cross_attention,
            use_linear_projection=use_linear_projection,
            only_cross_attention=only_cross_attention,
89
            resnet_time_scale_shift=resnet_time_scale_shift,
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
        )
    raise ValueError(f"{down_block_type} is not supported.")


def get_up_block(
    up_block_type,
    num_layers,
    in_channels,
    out_channels,
    prev_output_channel,
    temb_channels,
    add_upsample,
    resnet_eps,
    resnet_act_fn,
    attn_num_head_channels,
    resnet_groups=None,
    cross_attention_dim=None,
107
108
109
    dual_cross_attention=False,
    use_linear_projection=False,
    only_cross_attention=False,
110
    upcast_attention=False,
111
    resnet_time_scale_shift="default",
112
113
114
    resnet_skip_time_act=False,
    resnet_out_scale_factor=1.0,
    cross_attention_norm=None,
115
116
117
118
119
120
121
122
123
124
125
126
127
):
    up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
    if up_block_type == "UpBlockFlat":
        return UpBlockFlat(
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            prev_output_channel=prev_output_channel,
            temb_channels=temb_channels,
            add_upsample=add_upsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
            resnet_groups=resnet_groups,
128
            resnet_time_scale_shift=resnet_time_scale_shift,
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
        )
    elif up_block_type == "CrossAttnUpBlockFlat":
        if cross_attention_dim is None:
            raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlockFlat")
        return CrossAttnUpBlockFlat(
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            prev_output_channel=prev_output_channel,
            temb_channels=temb_channels,
            add_upsample=add_upsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
            resnet_groups=resnet_groups,
            cross_attention_dim=cross_attention_dim,
            attn_num_head_channels=attn_num_head_channels,
145
146
147
            dual_cross_attention=dual_cross_attention,
            use_linear_projection=use_linear_projection,
            only_cross_attention=only_cross_attention,
148
            resnet_time_scale_shift=resnet_time_scale_shift,
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
        )
    raise ValueError(f"{up_block_type} is not supported.")


# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel with UNet2DConditionModel->UNetFlatConditionModel, nn.Conv2d->LinearMultiDim, Block2D->BlockFlat
class UNetFlatConditionModel(ModelMixin, ConfigMixin):
    r"""
    UNetFlatConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a
    timestep and returns sample shaped output.

    This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library
    implements for all the models (such as downloading or saving, etc.)

    Parameters:
        sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
            Height and width of input/output sample.
        in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.
        out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.
        center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
Suraj Patil's avatar
Suraj Patil committed
168
        flip_sin_to_cos (`bool`, *optional*, defaults to `False`):
169
170
171
172
            Whether to flip the sin to cos in the time embedding.
        freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
        down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlockFlat", "CrossAttnDownBlockFlat", "CrossAttnDownBlockFlat", "DownBlockFlat")`):
            The tuple of downsample blocks to use.
Will Berman's avatar
Will Berman committed
173
        mid_block_type (`str`, *optional*, defaults to `"UNetMidBlockFlatCrossAttn"`):
174
175
            The mid block type. Choose from `UNetMidBlockFlatCrossAttn` or `UNetMidBlockFlatSimpleCrossAttn`, will skip
            the mid block layer if `None`.
176
177
        up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlockFlat", "CrossAttnUpBlockFlat", "CrossAttnUpBlockFlat", "CrossAttnUpBlockFlat",)`):
            The tuple of upsample blocks to use.
178
179
180
        only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):
            Whether to include self-attention in the basic transformer blocks, see
            [`~models.attention.BasicTransformerBlock`].
181
182
183
184
185
186
187
        block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
            The tuple of output channels for each block.
        layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
        downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
        mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
        act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
        norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
188
            If `None`, it will skip the normalization and activation layers in post-processing
189
        norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
Sanchit Gandhi's avatar
Sanchit Gandhi committed
190
191
        cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):
            The dimension of the cross attention features.
William Berman's avatar
William Berman committed
192
        encoder_hid_dim (`int`, *optional*, defaults to None):
YiYi Xu's avatar
YiYi Xu committed
193
194
195
196
197
            If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
            dimension to `cross_attention_dim`.
        encoder_hid_dim_type (`str`, *optional*, defaults to None):
            If given, the `encoder_hidden_states` and potentially other embeddings will be down-projected to text
            embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
198
        attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
Will Berman's avatar
Will Berman committed
199
200
        resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
            for resnet blocks, see [`~models.resnet.ResnetBlockFlat`]. Choose from `default` or `scale_shift`.
201
202
        class_embed_type (`str`, *optional*, defaults to None):
            The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
Sanchit Gandhi's avatar
Sanchit Gandhi committed
203
            `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
Patrick von Platen's avatar
Patrick von Platen committed
204
205
206
        addition_embed_type (`str`, *optional*, defaults to None):
            Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
            "text". "text" will use the `TextTimeEmbedding` layer.
207
208
209
        num_class_embeds (`int`, *optional*, defaults to None):
            Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
            class conditioning with `class_embed_type` equal to `None`.
210
211
        time_embedding_type (`str`, *optional*, default to `positional`):
            The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.
Patrick von Platen's avatar
Patrick von Platen committed
212
213
        time_embedding_dim (`int`, *optional*, default to `None`):
            An optional override for the dimension of the projected time embedding.
214
215
216
        time_embedding_act_fn (`str`, *optional*, default to `None`):
            Optional activation function to use on the time embeddings only one time before they as passed to the rest
            of the unet. Choose from `silu`, `mish`, `gelu`, and `swish`.
217
218
219
220
221
        timestep_post_act (`str, *optional*, default to `None`):
            The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.
        time_cond_proj_dim (`int`, *optional*, default to `None`):
            The dimension of `cond_proj` layer in timestep embedding.
        conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer.
Will Berman's avatar
Will Berman committed
222
223
224
        conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer.
        projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when
            using the "projection" `class_embed_type`. Required when using the "projection" `class_embed_type`.
Sanchit Gandhi's avatar
Sanchit Gandhi committed
225
        class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time
226
227
228
229
230
231
            embeddings with the class embeddings.
        mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):
            Whether to use cross attention with the mid block when using the `UNetMidBlockFlatSimpleCrossAttn`. If
            `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is None, the
            `only_cross_attention` value will be used as the value for `mid_block_only_cross_attention`. Else, it will
            default to `False`.
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
    """

    _supports_gradient_checkpointing = True

    @register_to_config
    def __init__(
        self,
        sample_size: Optional[int] = None,
        in_channels: int = 4,
        out_channels: int = 4,
        center_input_sample: bool = False,
        flip_sin_to_cos: bool = True,
        freq_shift: int = 0,
        down_block_types: Tuple[str] = (
            "CrossAttnDownBlockFlat",
            "CrossAttnDownBlockFlat",
            "CrossAttnDownBlockFlat",
            "DownBlockFlat",
        ),
251
        mid_block_type: Optional[str] = "UNetMidBlockFlatCrossAttn",
252
253
254
255
256
257
        up_block_types: Tuple[str] = (
            "UpBlockFlat",
            "CrossAttnUpBlockFlat",
            "CrossAttnUpBlockFlat",
            "CrossAttnUpBlockFlat",
        ),
258
        only_cross_attention: Union[bool, Tuple[bool]] = False,
259
        block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
260
        layers_per_block: Union[int, Tuple[int]] = 2,
261
262
263
        downsample_padding: int = 1,
        mid_block_scale_factor: float = 1,
        act_fn: str = "silu",
264
        norm_num_groups: Optional[int] = 32,
265
        norm_eps: float = 1e-5,
Sanchit Gandhi's avatar
Sanchit Gandhi committed
266
        cross_attention_dim: Union[int, Tuple[int]] = 1280,
William Berman's avatar
William Berman committed
267
        encoder_hid_dim: Optional[int] = None,
YiYi Xu's avatar
YiYi Xu committed
268
        encoder_hid_dim_type: Optional[str] = None,
Suraj Patil's avatar
Suraj Patil committed
269
        attention_head_dim: Union[int, Tuple[int]] = 8,
270
        dual_cross_attention: bool = False,
Suraj Patil's avatar
Suraj Patil committed
271
        use_linear_projection: bool = False,
Will Berman's avatar
Will Berman committed
272
        class_embed_type: Optional[str] = None,
Patrick von Platen's avatar
Patrick von Platen committed
273
        addition_embed_type: Optional[str] = None,
274
        num_class_embeds: Optional[int] = None,
275
        upcast_attention: bool = False,
Will Berman's avatar
Will Berman committed
276
        resnet_time_scale_shift: str = "default",
277
278
        resnet_skip_time_act: bool = False,
        resnet_out_scale_factor: int = 1.0,
279
        time_embedding_type: str = "positional",
Patrick von Platen's avatar
Patrick von Platen committed
280
        time_embedding_dim: Optional[int] = None,
281
        time_embedding_act_fn: Optional[str] = None,
282
283
284
285
        timestep_post_act: Optional[str] = None,
        time_cond_proj_dim: Optional[int] = None,
        conv_in_kernel: int = 3,
        conv_out_kernel: int = 3,
Will Berman's avatar
Will Berman committed
286
        projection_class_embeddings_input_dim: Optional[int] = None,
Sanchit Gandhi's avatar
Sanchit Gandhi committed
287
        class_embeddings_concat: bool = False,
288
        mid_block_only_cross_attention: Optional[bool] = None,
289
        cross_attention_norm: Optional[str] = None,
Patrick von Platen's avatar
Patrick von Platen committed
290
        addition_embed_type_num_heads=64,
291
292
293
294
295
    ):
        super().__init__()

        self.sample_size = sample_size

Will Berman's avatar
Will Berman committed
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
        # Check inputs
        if len(down_block_types) != len(up_block_types):
            raise ValueError(
                "Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`:"
                f" {down_block_types}. `up_block_types`: {up_block_types}."
            )

        if len(block_out_channels) != len(down_block_types):
            raise ValueError(
                "Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`:"
                f" {block_out_channels}. `down_block_types`: {down_block_types}."
            )

        if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
            raise ValueError(
                "Must provide the same number of `only_cross_attention` as `down_block_types`."
                f" `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
            )

        if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
            raise ValueError(
                "Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`:"
                f" {attention_head_dim}. `down_block_types`: {down_block_types}."
            )

Sanchit Gandhi's avatar
Sanchit Gandhi committed
321
322
323
324
325
326
        if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
            raise ValueError(
                "Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`:"
                f" {cross_attention_dim}. `down_block_types`: {down_block_types}."
            )

327
328
329
330
331
332
        if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):
            raise ValueError(
                "Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`:"
                f" {layers_per_block}. `down_block_types`: {down_block_types}."
            )

333
        # input
334
335
336
337
        conv_in_padding = (conv_in_kernel - 1) // 2
        self.conv_in = LinearMultiDim(
            in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
        )
338
339

        # time
340
        if time_embedding_type == "fourier":
Patrick von Platen's avatar
Patrick von Platen committed
341
            time_embed_dim = time_embedding_dim or block_out_channels[0] * 2
342
343
344
345
346
347
348
            if time_embed_dim % 2 != 0:
                raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.")
            self.time_proj = GaussianFourierProjection(
                time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos
            )
            timestep_input_dim = time_embed_dim
        elif time_embedding_type == "positional":
Patrick von Platen's avatar
Patrick von Platen committed
349
            time_embed_dim = time_embedding_dim or block_out_channels[0] * 4
350
351
352
353
354

            self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
            timestep_input_dim = block_out_channels[0]
        else:
            raise ValueError(
Alexander Pivovarov's avatar
Alexander Pivovarov committed
355
                f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`."
356
            )
357

358
359
360
361
362
363
364
        self.time_embedding = TimestepEmbedding(
            timestep_input_dim,
            time_embed_dim,
            act_fn=act_fn,
            post_act_fn=timestep_post_act,
            cond_proj_dim=time_cond_proj_dim,
        )
365

YiYi Xu's avatar
YiYi Xu committed
366
367
        if encoder_hid_dim_type is None and encoder_hid_dim is not None:
            encoder_hid_dim_type = "text_proj"
368
            self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
YiYi Xu's avatar
YiYi Xu committed
369
370
371
372
373
374
375
376
            logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")

        if encoder_hid_dim is None and encoder_hid_dim_type is not None:
            raise ValueError(
                f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
            )

        if encoder_hid_dim_type == "text_proj":
William Berman's avatar
William Berman committed
377
            self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
YiYi Xu's avatar
YiYi Xu committed
378
379
380
381
382
383
384
385
386
387
388
389
390
391
        elif encoder_hid_dim_type == "text_image_proj":
            # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
            # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
            # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)`
            self.encoder_hid_proj = TextImageProjection(
                text_embed_dim=encoder_hid_dim,
                image_embed_dim=cross_attention_dim,
                cross_attention_dim=cross_attention_dim,
            )

        elif encoder_hid_dim_type is not None:
            raise ValueError(
                f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
            )
William Berman's avatar
William Berman committed
392
393
394
        else:
            self.encoder_hid_proj = None

395
        # class embedding
Will Berman's avatar
Will Berman committed
396
        if class_embed_type is None and num_class_embeds is not None:
397
            self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
Will Berman's avatar
Will Berman committed
398
        elif class_embed_type == "timestep":
399
            self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)
Will Berman's avatar
Will Berman committed
400
401
        elif class_embed_type == "identity":
            self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
Will Berman's avatar
Will Berman committed
402
403
404
405
406
407
408
409
410
411
412
413
414
        elif class_embed_type == "projection":
            if projection_class_embeddings_input_dim is None:
                raise ValueError(
                    "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
                )
            # The projection `class_embed_type` is the same as the timestep `class_embed_type` except
            # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
            # 2. it projects from an arbitrary input dimension.
            #
            # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
            # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
            # As a result, `TimestepEmbedding` can be passed arbitrary vectors.
            self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
Sanchit Gandhi's avatar
Sanchit Gandhi committed
415
416
417
418
419
420
        elif class_embed_type == "simple_projection":
            if projection_class_embeddings_input_dim is None:
                raise ValueError(
                    "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set"
                )
            self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)
Will Berman's avatar
Will Berman committed
421
422
        else:
            self.class_embedding = None
423

Patrick von Platen's avatar
Patrick von Platen committed
424
425
426
427
428
429
430
431
432
        if addition_embed_type == "text":
            if encoder_hid_dim is not None:
                text_time_embedding_from_dim = encoder_hid_dim
            else:
                text_time_embedding_from_dim = cross_attention_dim

            self.add_embedding = TextTimeEmbedding(
                text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
            )
YiYi Xu's avatar
YiYi Xu committed
433
434
435
436
437
438
439
        elif addition_embed_type == "text_image":
            # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
            # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
            # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)`
            self.add_embedding = TextImageTimeEmbedding(
                text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
            )
Patrick von Platen's avatar
Patrick von Platen committed
440
        elif addition_embed_type is not None:
YiYi Xu's avatar
YiYi Xu committed
441
            raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
Patrick von Platen's avatar
Patrick von Platen committed
442

443
444
445
        if time_embedding_act_fn is None:
            self.time_embed_act = None
        else:
446
            self.time_embed_act = get_activation(time_embedding_act_fn)
447

448
449
450
        self.down_blocks = nn.ModuleList([])
        self.up_blocks = nn.ModuleList([])

451
        if isinstance(only_cross_attention, bool):
452
453
454
            if mid_block_only_cross_attention is None:
                mid_block_only_cross_attention = only_cross_attention

455
456
            only_cross_attention = [only_cross_attention] * len(down_block_types)

457
458
459
        if mid_block_only_cross_attention is None:
            mid_block_only_cross_attention = False

Suraj Patil's avatar
Suraj Patil committed
460
461
462
        if isinstance(attention_head_dim, int):
            attention_head_dim = (attention_head_dim,) * len(down_block_types)

Sanchit Gandhi's avatar
Sanchit Gandhi committed
463
464
465
        if isinstance(cross_attention_dim, int):
            cross_attention_dim = (cross_attention_dim,) * len(down_block_types)

466
467
468
        if isinstance(layers_per_block, int):
            layers_per_block = [layers_per_block] * len(down_block_types)

Sanchit Gandhi's avatar
Sanchit Gandhi committed
469
470
471
472
473
474
475
476
        if class_embeddings_concat:
            # The time embeddings are concatenated with the class embeddings. The dimension of the
            # time embeddings passed to the down, middle, and up blocks is twice the dimension of the
            # regular time embeddings
            blocks_time_embed_dim = time_embed_dim * 2
        else:
            blocks_time_embed_dim = time_embed_dim

477
478
479
480
481
482
483
484
485
        # down
        output_channel = block_out_channels[0]
        for i, down_block_type in enumerate(down_block_types):
            input_channel = output_channel
            output_channel = block_out_channels[i]
            is_final_block = i == len(block_out_channels) - 1

            down_block = get_down_block(
                down_block_type,
486
                num_layers=layers_per_block[i],
487
488
                in_channels=input_channel,
                out_channels=output_channel,
Sanchit Gandhi's avatar
Sanchit Gandhi committed
489
                temb_channels=blocks_time_embed_dim,
490
491
492
493
                add_downsample=not is_final_block,
                resnet_eps=norm_eps,
                resnet_act_fn=act_fn,
                resnet_groups=norm_num_groups,
Sanchit Gandhi's avatar
Sanchit Gandhi committed
494
                cross_attention_dim=cross_attention_dim[i],
Suraj Patil's avatar
Suraj Patil committed
495
                attn_num_head_channels=attention_head_dim[i],
496
497
                downsample_padding=downsample_padding,
                dual_cross_attention=dual_cross_attention,
Suraj Patil's avatar
Suraj Patil committed
498
                use_linear_projection=use_linear_projection,
499
                only_cross_attention=only_cross_attention[i],
500
                upcast_attention=upcast_attention,
Will Berman's avatar
Will Berman committed
501
                resnet_time_scale_shift=resnet_time_scale_shift,
502
503
                resnet_skip_time_act=resnet_skip_time_act,
                resnet_out_scale_factor=resnet_out_scale_factor,
504
                cross_attention_norm=cross_attention_norm,
505
506
507
508
            )
            self.down_blocks.append(down_block)

        # mid
Will Berman's avatar
Will Berman committed
509
510
511
        if mid_block_type == "UNetMidBlockFlatCrossAttn":
            self.mid_block = UNetMidBlockFlatCrossAttn(
                in_channels=block_out_channels[-1],
Sanchit Gandhi's avatar
Sanchit Gandhi committed
512
                temb_channels=blocks_time_embed_dim,
Will Berman's avatar
Will Berman committed
513
514
515
516
                resnet_eps=norm_eps,
                resnet_act_fn=act_fn,
                output_scale_factor=mid_block_scale_factor,
                resnet_time_scale_shift=resnet_time_scale_shift,
Sanchit Gandhi's avatar
Sanchit Gandhi committed
517
                cross_attention_dim=cross_attention_dim[-1],
Will Berman's avatar
Will Berman committed
518
519
520
521
522
523
524
525
526
                attn_num_head_channels=attention_head_dim[-1],
                resnet_groups=norm_num_groups,
                dual_cross_attention=dual_cross_attention,
                use_linear_projection=use_linear_projection,
                upcast_attention=upcast_attention,
            )
        elif mid_block_type == "UNetMidBlockFlatSimpleCrossAttn":
            self.mid_block = UNetMidBlockFlatSimpleCrossAttn(
                in_channels=block_out_channels[-1],
Sanchit Gandhi's avatar
Sanchit Gandhi committed
527
                temb_channels=blocks_time_embed_dim,
Will Berman's avatar
Will Berman committed
528
529
530
                resnet_eps=norm_eps,
                resnet_act_fn=act_fn,
                output_scale_factor=mid_block_scale_factor,
Sanchit Gandhi's avatar
Sanchit Gandhi committed
531
                cross_attention_dim=cross_attention_dim[-1],
Will Berman's avatar
Will Berman committed
532
533
534
                attn_num_head_channels=attention_head_dim[-1],
                resnet_groups=norm_num_groups,
                resnet_time_scale_shift=resnet_time_scale_shift,
535
                skip_time_act=resnet_skip_time_act,
536
                only_cross_attention=mid_block_only_cross_attention,
537
                cross_attention_norm=cross_attention_norm,
Will Berman's avatar
Will Berman committed
538
            )
539
540
        elif mid_block_type is None:
            self.mid_block = None
Will Berman's avatar
Will Berman committed
541
542
        else:
            raise ValueError(f"unknown mid_block_type : {mid_block_type}")
543
544
545
546
547
548

        # count how many layers upsample the images
        self.num_upsamplers = 0

        # up
        reversed_block_out_channels = list(reversed(block_out_channels))
Suraj Patil's avatar
Suraj Patil committed
549
        reversed_attention_head_dim = list(reversed(attention_head_dim))
550
        reversed_layers_per_block = list(reversed(layers_per_block))
Sanchit Gandhi's avatar
Sanchit Gandhi committed
551
        reversed_cross_attention_dim = list(reversed(cross_attention_dim))
552
        only_cross_attention = list(reversed(only_cross_attention))
553

554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
        output_channel = reversed_block_out_channels[0]
        for i, up_block_type in enumerate(up_block_types):
            is_final_block = i == len(block_out_channels) - 1

            prev_output_channel = output_channel
            output_channel = reversed_block_out_channels[i]
            input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]

            # add upsample block for all BUT final layer
            if not is_final_block:
                add_upsample = True
                self.num_upsamplers += 1
            else:
                add_upsample = False

            up_block = get_up_block(
                up_block_type,
571
                num_layers=reversed_layers_per_block[i] + 1,
572
573
574
                in_channels=input_channel,
                out_channels=output_channel,
                prev_output_channel=prev_output_channel,
Sanchit Gandhi's avatar
Sanchit Gandhi committed
575
                temb_channels=blocks_time_embed_dim,
576
577
578
579
                add_upsample=add_upsample,
                resnet_eps=norm_eps,
                resnet_act_fn=act_fn,
                resnet_groups=norm_num_groups,
Sanchit Gandhi's avatar
Sanchit Gandhi committed
580
                cross_attention_dim=reversed_cross_attention_dim[i],
Suraj Patil's avatar
Suraj Patil committed
581
                attn_num_head_channels=reversed_attention_head_dim[i],
582
                dual_cross_attention=dual_cross_attention,
Suraj Patil's avatar
Suraj Patil committed
583
                use_linear_projection=use_linear_projection,
584
                only_cross_attention=only_cross_attention[i],
585
                upcast_attention=upcast_attention,
Will Berman's avatar
Will Berman committed
586
                resnet_time_scale_shift=resnet_time_scale_shift,
587
588
                resnet_skip_time_act=resnet_skip_time_act,
                resnet_out_scale_factor=resnet_out_scale_factor,
589
                cross_attention_norm=cross_attention_norm,
590
591
592
593
594
            )
            self.up_blocks.append(up_block)
            prev_output_channel = output_channel

        # out
595
596
597
598
        if norm_num_groups is not None:
            self.conv_norm_out = nn.GroupNorm(
                num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
            )
599

600
            self.conv_act = get_activation(act_fn)
601

602
603
604
605
606
607
608
609
        else:
            self.conv_norm_out = None
            self.conv_act = None

        conv_out_padding = (conv_out_kernel - 1) // 2
        self.conv_out = LinearMultiDim(
            block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
        )
610

611
    @property
Patrick von Platen's avatar
Patrick von Platen committed
612
    def attn_processors(self) -> Dict[str, AttentionProcessor]:
613
614
615
616
617
        r"""
        Returns:
            `dict` of attention processors: A dictionary containing all attention processors used in the model with
            indexed by its weight name.
        """
618
        # set recursively
619
620
        processors = {}

Patrick von Platen's avatar
Patrick von Platen committed
621
        def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
622
623
624
625
626
627
628
629
630
631
632
633
634
            if hasattr(module, "set_processor"):
                processors[f"{name}.processor"] = module.processor

            for sub_name, child in module.named_children():
                fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)

            return processors

        for name, module in self.named_children():
            fn_recursive_add_processors(name, module, processors)

        return processors

Patrick von Platen's avatar
Patrick von Platen committed
635
    def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
636
637
        r"""
        Parameters:
Patrick von Platen's avatar
Patrick von Platen committed
638
            `processor (`dict` of `AttentionProcessor` or `AttentionProcessor`):
639
                The instantiated processor class or a dictionary of processor classes that will be set as the processor
Patrick von Platen's avatar
Patrick von Platen committed
640
                of **all** `Attention` layers.
Alexander Pivovarov's avatar
Alexander Pivovarov committed
641
            In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors.:
642
643
644
645
646
647
648
649
650
651
652

        """
        count = len(self.attn_processors.keys())

        if isinstance(processor, dict) and len(processor) != count:
            raise ValueError(
                f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
                f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
            )

        def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
653
            if hasattr(module, "set_processor"):
654
655
656
657
                if not isinstance(processor, dict):
                    module.set_processor(processor)
                else:
                    module.set_processor(processor.pop(f"{name}.processor"))
658

659
660
            for sub_name, child in module.named_children():
                fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
661

662
663
        for name, module in self.named_children():
            fn_recursive_attn_processor(name, module, processor)
664

665
666
667
668
669
670
    def set_default_attn_processor(self):
        """
        Disables custom attention processors and sets the default attention implementation.
        """
        self.set_attn_processor(AttnProcessor())

671
    def set_attention_slice(self, slice_size):
672
673
674
675
676
677
678
        r"""
        Enable sliced attention computation.

        When this option is enabled, the attention module will split the input tensor in slices, to compute attention
        in several steps. This is useful to save some memory in exchange for a small speed decrease.

        Args:
Patrick von Platen's avatar
Patrick von Platen committed
679
            slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
680
                When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If
Alexander Pivovarov's avatar
Alexander Pivovarov committed
681
                `"max"`, maximum amount of memory will be saved by running only one slice at a time. If a number is
682
683
684
685
686
                provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
                must be a multiple of `slice_size`.
        """
        sliceable_head_dims = []

Alexander Pivovarov's avatar
Alexander Pivovarov committed
687
        def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
688
689
690
691
            if hasattr(module, "set_attention_slice"):
                sliceable_head_dims.append(module.sliceable_head_dim)

            for child in module.children():
Alexander Pivovarov's avatar
Alexander Pivovarov committed
692
                fn_recursive_retrieve_sliceable_dims(child)
693
694
695

        # retrieve number of attention layers
        for module in self.children():
Alexander Pivovarov's avatar
Alexander Pivovarov committed
696
            fn_recursive_retrieve_sliceable_dims(module)
697

Alexander Pivovarov's avatar
Alexander Pivovarov committed
698
        num_sliceable_layers = len(sliceable_head_dims)
699
700
701
702
703
704
705

        if slice_size == "auto":
            # half the attention head size is usually a good trade-off between
            # speed and memory
            slice_size = [dim // 2 for dim in sliceable_head_dims]
        elif slice_size == "max":
            # make smallest slice possible
Alexander Pivovarov's avatar
Alexander Pivovarov committed
706
            slice_size = num_sliceable_layers * [1]
707

Alexander Pivovarov's avatar
Alexander Pivovarov committed
708
        slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
709
710

        if len(slice_size) != len(sliceable_head_dims):
711
            raise ValueError(
712
713
                f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
                f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
714
715
            )

716
717
718
719
720
721
722
723
724
725
726
727
        for i in range(len(slice_size)):
            size = slice_size[i]
            dim = sliceable_head_dims[i]
            if size is not None and size > dim:
                raise ValueError(f"size {size} has to be smaller or equal to {dim}.")

        # Recursively walk through all the children.
        # Any children which exposes the set_attention_slice method
        # gets the message
        def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
            if hasattr(module, "set_attention_slice"):
                module.set_attention_slice(slice_size.pop())
728

729
730
            for child in module.children():
                fn_recursive_set_attention_slice(child, slice_size)
731

732
733
734
        reversed_slice_size = list(reversed(slice_size))
        for module in self.children():
            fn_recursive_set_attention_slice(module, reversed_slice_size)
735
736
737
738
739
740
741
742
743
744

    def _set_gradient_checkpointing(self, module, value=False):
        if isinstance(module, (CrossAttnDownBlockFlat, DownBlockFlat, CrossAttnUpBlockFlat, UpBlockFlat)):
            module.gradient_checkpointing = value

    def forward(
        self,
        sample: torch.FloatTensor,
        timestep: Union[torch.Tensor, float, int],
        encoder_hidden_states: torch.Tensor,
745
        class_labels: Optional[torch.Tensor] = None,
746
        timestep_cond: Optional[torch.Tensor] = None,
Will Berman's avatar
Will Berman committed
747
        attention_mask: Optional[torch.Tensor] = None,
748
        cross_attention_kwargs: Optional[Dict[str, Any]] = None,
YiYi Xu's avatar
YiYi Xu committed
749
        added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
750
751
        down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
        mid_block_additional_residual: Optional[torch.Tensor] = None,
752
        encoder_attention_mask: Optional[torch.Tensor] = None,
753
754
755
756
757
758
        return_dict: bool = True,
    ) -> Union[UNet2DConditionOutput, Tuple]:
        r"""
        Args:
            sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
            timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
759
            encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states
760
761
762
763
            encoder_attention_mask (`torch.Tensor`):
                (batch, sequence_length) cross-attention mask, applied to encoder_hidden_states. True = keep, False =
                discard. Mask will be converted into a bias, which adds large negative values to attention scores
                corresponding to "discard" tokens.
764
765
            return_dict (`bool`, *optional*, defaults to `True`):
                Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.
766
            cross_attention_kwargs (`dict`, *optional*):
Patrick von Platen's avatar
Patrick von Platen committed
767
                A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
768
769
                `self.processor` in
                [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
YiYi Xu's avatar
YiYi Xu committed
770
771
772
773
            added_cond_kwargs (`dict`, *optional*):
                A kwargs dictionary that if specified includes additonal conditions that can be used for additonal time
                embeddings or encoder hidden states projections. See the configurations `encoder_hid_dim_type` and
                `addition_embed_type` for more information.
774
775
776
777
778
779
780

        Returns:
            [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
            [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When
            returning a tuple, the first element is the sample tensor.
        """
        # By default samples have to be AT least a multiple of the overall upsampling factor.
Alexander Pivovarov's avatar
Alexander Pivovarov committed
781
        # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
782
783
784
785
786
787
788
789
790
791
792
793
        # However, the upsampling interpolation output size can be forced to fit any upsampling size
        # on the fly if necessary.
        default_overall_up_factor = 2**self.num_upsamplers

        # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
        forward_upsample_size = False
        upsample_size = None

        if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
            logger.info("Forward upsample size to force interpolation output size.")
            forward_upsample_size = True

794
795
796
797
798
799
800
801
        # ensure attention_mask is a bias, and give it a singleton query_tokens dimension
        # expects mask of shape:
        #   [batch, key_tokens]
        # adds singleton query_tokens dimension:
        #   [batch,                    1, key_tokens]
        # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
        #   [batch,  heads, query_tokens, key_tokens] (e.g. torch sdp attn)
        #   [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
Will Berman's avatar
Will Berman committed
802
        if attention_mask is not None:
803
804
805
806
            # assume that mask is expressed as:
            #   (1 = keep,      0 = discard)
            # convert mask into a bias that can be added to attention scores:
            #       (keep = +0,     discard = -10000.0)
Will Berman's avatar
Will Berman committed
807
808
809
            attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
            attention_mask = attention_mask.unsqueeze(1)

810
811
812
813
814
        # convert encoder_attention_mask to a bias the same way we do for attention_mask
        if encoder_attention_mask is not None:
            encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
            encoder_attention_mask = encoder_attention_mask.unsqueeze(1)

815
816
817
818
819
820
821
822
        # 0. center input if necessary
        if self.config.center_input_sample:
            sample = 2 * sample - 1.0

        # 1. time
        timesteps = timestep
        if not torch.is_tensor(timesteps):
            # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
823
824
            # This would be a good case for the `match` statement (Python 3.10+)
            is_mps = sample.device.type == "mps"
Patrick von Platen's avatar
Patrick von Platen committed
825
            if isinstance(timestep, float):
826
827
828
829
830
                dtype = torch.float32 if is_mps else torch.float64
            else:
                dtype = torch.int32 if is_mps else torch.int64
            timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
        elif len(timesteps.shape) == 0:
831
832
833
834
835
836
837
            timesteps = timesteps[None].to(sample.device)

        # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
        timesteps = timesteps.expand(sample.shape[0])

        t_emb = self.time_proj(timesteps)

838
        # `Timesteps` does not contain any weights and will always return f32 tensors
839
840
        # but time_embedding might actually be running in fp16. so we need to cast here.
        # there might be better ways to encapsulate this.
841
        t_emb = t_emb.to(dtype=sample.dtype)
842
843

        emb = self.time_embedding(t_emb, timestep_cond)
844

Will Berman's avatar
Will Berman committed
845
        if self.class_embedding is not None:
846
847
            if class_labels is None:
                raise ValueError("class_labels should be provided when num_class_embeds > 0")
Will Berman's avatar
Will Berman committed
848
849
850
851

            if self.config.class_embed_type == "timestep":
                class_labels = self.time_proj(class_labels)

852
853
854
855
                # `Timesteps` does not contain any weights and will always return f32 tensors
                # there might be better ways to encapsulate this.
                class_labels = class_labels.to(dtype=sample.dtype)

856
            class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)
Sanchit Gandhi's avatar
Sanchit Gandhi committed
857
858
859
860
861

            if self.config.class_embeddings_concat:
                emb = torch.cat([emb, class_emb], dim=-1)
            else:
                emb = emb + class_emb
862

Patrick von Platen's avatar
Patrick von Platen committed
863
864
865
        if self.config.addition_embed_type == "text":
            aug_emb = self.add_embedding(encoder_hidden_states)
            emb = emb + aug_emb
YiYi Xu's avatar
YiYi Xu committed
866
867
868
869
870
871
872
873
874
875
876
877
878
        elif self.config.addition_embed_type == "text_image":
            # Kadinsky 2.1 - style
            if "image_embeds" not in added_cond_kwargs:
                raise ValueError(
                    f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires"
                    " the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
                )

            image_embs = added_cond_kwargs.get("image_embeds")
            text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states)

            aug_emb = self.add_embedding(text_embs, image_embs)
            emb = emb + aug_emb
Patrick von Platen's avatar
Patrick von Platen committed
879

880
881
882
        if self.time_embed_act is not None:
            emb = self.time_embed_act(emb)

YiYi Xu's avatar
YiYi Xu committed
883
        if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj":
William Berman's avatar
William Berman committed
884
            encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
YiYi Xu's avatar
YiYi Xu committed
885
886
887
888
889
890
891
892
893
894
        elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj":
            # Kadinsky 2.1 - style
            if "image_embeds" not in added_cond_kwargs:
                raise ValueError(
                    f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which"
                    " requires the keyword argument `image_embeds` to be passed in  `added_conditions`"
                )

            image_embeds = added_cond_kwargs.get("image_embeds")
            encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)
William Berman's avatar
William Berman committed
895

896
897
898
899
900
901
        # 2. pre-process
        sample = self.conv_in(sample)

        # 3. down
        down_block_res_samples = (sample,)
        for downsample_block in self.down_blocks:
902
            if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
903
904
905
906
                sample, res_samples = downsample_block(
                    hidden_states=sample,
                    temb=emb,
                    encoder_hidden_states=encoder_hidden_states,
Will Berman's avatar
Will Berman committed
907
                    attention_mask=attention_mask,
908
                    cross_attention_kwargs=cross_attention_kwargs,
909
                    encoder_attention_mask=encoder_attention_mask,
910
911
912
913
914
915
                )
            else:
                sample, res_samples = downsample_block(hidden_states=sample, temb=emb)

            down_block_res_samples += res_samples

916
917
918
919
920
921
        if down_block_additional_residuals is not None:
            new_down_block_res_samples = ()

            for down_block_res_sample, down_block_additional_residual in zip(
                down_block_res_samples, down_block_additional_residuals
            ):
922
                down_block_res_sample = down_block_res_sample + down_block_additional_residual
923
                new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
924
925
926

            down_block_res_samples = new_down_block_res_samples

927
        # 4. mid
928
929
930
931
932
933
934
        if self.mid_block is not None:
            sample = self.mid_block(
                sample,
                emb,
                encoder_hidden_states=encoder_hidden_states,
                attention_mask=attention_mask,
                cross_attention_kwargs=cross_attention_kwargs,
935
                encoder_attention_mask=encoder_attention_mask,
936
            )
937

938
        if mid_block_additional_residual is not None:
939
            sample = sample + mid_block_additional_residual
940

941
942
943
944
945
946
947
948
949
950
951
952
        # 5. up
        for i, upsample_block in enumerate(self.up_blocks):
            is_final_block = i == len(self.up_blocks) - 1

            res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
            down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]

            # if we have not reached the final block and need to forward the
            # upsample size, we do it here
            if not is_final_block and forward_upsample_size:
                upsample_size = down_block_res_samples[-1].shape[2:]

953
            if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
954
955
956
957
958
                sample = upsample_block(
                    hidden_states=sample,
                    temb=emb,
                    res_hidden_states_tuple=res_samples,
                    encoder_hidden_states=encoder_hidden_states,
959
                    cross_attention_kwargs=cross_attention_kwargs,
960
                    upsample_size=upsample_size,
Will Berman's avatar
Will Berman committed
961
                    attention_mask=attention_mask,
962
                    encoder_attention_mask=encoder_attention_mask,
963
964
965
966
967
                )
            else:
                sample = upsample_block(
                    hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size
                )
968

969
        # 6. post-process
970
971
972
        if self.conv_norm_out:
            sample = self.conv_norm_out(sample)
            sample = self.conv_act(sample)
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
        sample = self.conv_out(sample)

        if not return_dict:
            return (sample,)

        return UNet2DConditionOutput(sample=sample)


class LinearMultiDim(nn.Linear):
    def __init__(self, in_features, out_features=None, second_dim=4, *args, **kwargs):
        in_features = [in_features, second_dim, 1] if isinstance(in_features, int) else list(in_features)
        if out_features is None:
            out_features = in_features
        out_features = [out_features, second_dim, 1] if isinstance(out_features, int) else list(out_features)
        self.in_features_multidim = in_features
        self.out_features_multidim = out_features
        super().__init__(np.array(in_features).prod(), np.array(out_features).prod())

    def forward(self, input_tensor, *args, **kwargs):
        shape = input_tensor.shape
        n_dim = len(self.in_features_multidim)
        input_tensor = input_tensor.reshape(*shape[0:-n_dim], self.in_features)
        output_tensor = super().forward(input_tensor)
        output_tensor = output_tensor.view(*shape[0:-n_dim], *self.out_features_multidim)
        return output_tensor


class ResnetBlockFlat(nn.Module):
    def __init__(
        self,
        *,
        in_channels,
        out_channels=None,
        dropout=0.0,
        temb_channels=512,
        groups=32,
        groups_out=None,
        pre_norm=True,
        eps=1e-6,
        time_embedding_norm="default",
        use_in_shortcut=None,
        second_dim=4,
        **kwargs,
    ):
        super().__init__()
        self.pre_norm = pre_norm
        self.pre_norm = True

        in_channels = [in_channels, second_dim, 1] if isinstance(in_channels, int) else list(in_channels)
        self.in_channels_prod = np.array(in_channels).prod()
        self.channels_multidim = in_channels

        if out_channels is not None:
            out_channels = [out_channels, second_dim, 1] if isinstance(out_channels, int) else list(out_channels)
            out_channels_prod = np.array(out_channels).prod()
            self.out_channels_multidim = out_channels
        else:
            out_channels_prod = self.in_channels_prod
            self.out_channels_multidim = self.channels_multidim
        self.time_embedding_norm = time_embedding_norm

        if groups_out is None:
            groups_out = groups

        self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=self.in_channels_prod, eps=eps, affine=True)
        self.conv1 = torch.nn.Conv2d(self.in_channels_prod, out_channels_prod, kernel_size=1, padding=0)

        if temb_channels is not None:
            self.time_emb_proj = torch.nn.Linear(temb_channels, out_channels_prod)
        else:
            self.time_emb_proj = None

        self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels_prod, eps=eps, affine=True)
        self.dropout = torch.nn.Dropout(dropout)
        self.conv2 = torch.nn.Conv2d(out_channels_prod, out_channels_prod, kernel_size=1, padding=0)

        self.nonlinearity = nn.SiLU()

        self.use_in_shortcut = (
            self.in_channels_prod != out_channels_prod if use_in_shortcut is None else use_in_shortcut
        )

        self.conv_shortcut = None
        if self.use_in_shortcut:
            self.conv_shortcut = torch.nn.Conv2d(
                self.in_channels_prod, out_channels_prod, kernel_size=1, stride=1, padding=0
            )

    def forward(self, input_tensor, temb):
        shape = input_tensor.shape
        n_dim = len(self.channels_multidim)
        input_tensor = input_tensor.reshape(*shape[0:-n_dim], self.in_channels_prod, 1, 1)
        input_tensor = input_tensor.view(-1, self.in_channels_prod, 1, 1)

        hidden_states = input_tensor

        hidden_states = self.norm1(hidden_states)
        hidden_states = self.nonlinearity(hidden_states)
        hidden_states = self.conv1(hidden_states)

        if temb is not None:
            temb = self.time_emb_proj(self.nonlinearity(temb))[:, :, None, None]
            hidden_states = hidden_states + temb

        hidden_states = self.norm2(hidden_states)
        hidden_states = self.nonlinearity(hidden_states)

        hidden_states = self.dropout(hidden_states)
        hidden_states = self.conv2(hidden_states)

        if self.conv_shortcut is not None:
            input_tensor = self.conv_shortcut(input_tensor)

        output_tensor = input_tensor + hidden_states

        output_tensor = output_tensor.view(*shape[0:-n_dim], -1)
        output_tensor = output_tensor.view(*shape[0:-n_dim], *self.out_channels_multidim)

        return output_tensor


# Copied from diffusers.models.unet_2d_blocks.DownBlock2D with DownBlock2D->DownBlockFlat, ResnetBlock2D->ResnetBlockFlat, Downsample2D->LinearMultiDim
class DownBlockFlat(nn.Module):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        temb_channels: int,
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
        output_scale_factor=1.0,
        add_downsample=True,
        downsample_padding=1,
    ):
        super().__init__()
        resnets = []

        for i in range(num_layers):
            in_channels = in_channels if i == 0 else out_channels
            resnets.append(
                ResnetBlockFlat(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )

        self.resnets = nn.ModuleList(resnets)

        if add_downsample:
            self.downsamplers = nn.ModuleList(
                [
                    LinearMultiDim(
                        out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
                    )
                ]
            )
        else:
            self.downsamplers = None

        self.gradient_checkpointing = False

    def forward(self, hidden_states, temb=None):
        output_states = ()

        for resnet in self.resnets:
            if self.training and self.gradient_checkpointing:

                def create_custom_forward(module):
                    def custom_forward(*inputs):
                        return module(*inputs)

                    return custom_forward

1159
1160
1161
1162
1163
1164
1165
1166
                if is_torch_version(">=", "1.11.0"):
                    hidden_states = torch.utils.checkpoint.checkpoint(
                        create_custom_forward(resnet), hidden_states, temb, use_reentrant=False
                    )
                else:
                    hidden_states = torch.utils.checkpoint.checkpoint(
                        create_custom_forward(resnet), hidden_states, temb
                    )
1167
1168
1169
            else:
                hidden_states = resnet(hidden_states, temb)

1170
            output_states = output_states + (hidden_states,)
1171
1172
1173
1174
1175

        if self.downsamplers is not None:
            for downsampler in self.downsamplers:
                hidden_states = downsampler(hidden_states)

1176
            output_states = output_states + (hidden_states,)
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200

        return hidden_states, output_states


# Copied from diffusers.models.unet_2d_blocks.CrossAttnDownBlock2D with CrossAttnDownBlock2D->CrossAttnDownBlockFlat, ResnetBlock2D->ResnetBlockFlat, Downsample2D->LinearMultiDim
class CrossAttnDownBlockFlat(nn.Module):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        temb_channels: int,
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
        attn_num_head_channels=1,
        cross_attention_dim=1280,
        output_scale_factor=1.0,
        downsample_padding=1,
        add_downsample=True,
        dual_cross_attention=False,
Suraj Patil's avatar
Suraj Patil committed
1201
        use_linear_projection=False,
1202
        only_cross_attention=False,
1203
        upcast_attention=False,
1204
1205
1206
1207
1208
    ):
        super().__init__()
        resnets = []
        attentions = []

1209
        self.has_cross_attention = True
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
        self.attn_num_head_channels = attn_num_head_channels

        for i in range(num_layers):
            in_channels = in_channels if i == 0 else out_channels
            resnets.append(
                ResnetBlockFlat(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )
            if not dual_cross_attention:
                attentions.append(
                    Transformer2DModel(
                        attn_num_head_channels,
                        out_channels // attn_num_head_channels,
                        in_channels=out_channels,
                        num_layers=1,
                        cross_attention_dim=cross_attention_dim,
                        norm_num_groups=resnet_groups,
Suraj Patil's avatar
Suraj Patil committed
1237
                        use_linear_projection=use_linear_projection,
1238
                        only_cross_attention=only_cross_attention,
1239
                        upcast_attention=upcast_attention,
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
                    )
                )
            else:
                attentions.append(
                    DualTransformer2DModel(
                        attn_num_head_channels,
                        out_channels // attn_num_head_channels,
                        in_channels=out_channels,
                        num_layers=1,
                        cross_attention_dim=cross_attention_dim,
                        norm_num_groups=resnet_groups,
                    )
                )
        self.attentions = nn.ModuleList(attentions)
        self.resnets = nn.ModuleList(resnets)

        if add_downsample:
            self.downsamplers = nn.ModuleList(
                [
                    LinearMultiDim(
                        out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
                    )
                ]
            )
        else:
            self.downsamplers = None

        self.gradient_checkpointing = False

1269
    def forward(
1270
1271
1272
1273
1274
1275
1276
        self,
        hidden_states: torch.FloatTensor,
        temb: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        cross_attention_kwargs: Optional[Dict[str, Any]] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
1277
    ):
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
        output_states = ()

        for resnet, attn in zip(self.resnets, self.attentions):
            if self.training and self.gradient_checkpointing:

                def create_custom_forward(module, return_dict=None):
                    def custom_forward(*inputs):
                        if return_dict is not None:
                            return module(*inputs, return_dict=return_dict)
                        else:
                            return module(*inputs)

                    return custom_forward

1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
                ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
                hidden_states = torch.utils.checkpoint.checkpoint(
                    create_custom_forward(resnet),
                    hidden_states,
                    temb,
                    **ckpt_kwargs,
                )
                hidden_states = torch.utils.checkpoint.checkpoint(
                    create_custom_forward(attn, return_dict=False),
                    hidden_states,
                    encoder_hidden_states,
                    None,  # timestep
                    None,  # class_labels
                    cross_attention_kwargs,
                    attention_mask,
                    encoder_attention_mask,
                    **ckpt_kwargs,
                )[0]
1310
1311
            else:
                hidden_states = resnet(hidden_states, temb)
1312
1313
1314
1315
                hidden_states = attn(
                    hidden_states,
                    encoder_hidden_states=encoder_hidden_states,
                    cross_attention_kwargs=cross_attention_kwargs,
1316
1317
                    attention_mask=attention_mask,
                    encoder_attention_mask=encoder_attention_mask,
1318
1319
                    return_dict=False,
                )[0]
1320

1321
            output_states = output_states + (hidden_states,)
1322
1323
1324
1325
1326

        if self.downsamplers is not None:
            for downsampler in self.downsamplers:
                hidden_states = downsampler(hidden_states)

1327
            output_states = output_states + (hidden_states,)
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395

        return hidden_states, output_states


# Copied from diffusers.models.unet_2d_blocks.UpBlock2D with UpBlock2D->UpBlockFlat, ResnetBlock2D->ResnetBlockFlat, Upsample2D->LinearMultiDim
class UpBlockFlat(nn.Module):
    def __init__(
        self,
        in_channels: int,
        prev_output_channel: int,
        out_channels: int,
        temb_channels: int,
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
        output_scale_factor=1.0,
        add_upsample=True,
    ):
        super().__init__()
        resnets = []

        for i in range(num_layers):
            res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
            resnet_in_channels = prev_output_channel if i == 0 else out_channels

            resnets.append(
                ResnetBlockFlat(
                    in_channels=resnet_in_channels + res_skip_channels,
                    out_channels=out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )

        self.resnets = nn.ModuleList(resnets)

        if add_upsample:
            self.upsamplers = nn.ModuleList([LinearMultiDim(out_channels, use_conv=True, out_channels=out_channels)])
        else:
            self.upsamplers = None

        self.gradient_checkpointing = False

    def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):
        for resnet in self.resnets:
            # pop res hidden states
            res_hidden_states = res_hidden_states_tuple[-1]
            res_hidden_states_tuple = res_hidden_states_tuple[:-1]
            hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)

            if self.training and self.gradient_checkpointing:

                def create_custom_forward(module):
                    def custom_forward(*inputs):
                        return module(*inputs)

                    return custom_forward

1396
1397
1398
1399
1400
1401
1402
1403
                if is_torch_version(">=", "1.11.0"):
                    hidden_states = torch.utils.checkpoint.checkpoint(
                        create_custom_forward(resnet), hidden_states, temb, use_reentrant=False
                    )
                else:
                    hidden_states = torch.utils.checkpoint.checkpoint(
                        create_custom_forward(resnet), hidden_states, temb
                    )
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
            else:
                hidden_states = resnet(hidden_states, temb)

        if self.upsamplers is not None:
            for upsampler in self.upsamplers:
                hidden_states = upsampler(hidden_states, upsample_size)

        return hidden_states


# Copied from diffusers.models.unet_2d_blocks.CrossAttnUpBlock2D with CrossAttnUpBlock2D->CrossAttnUpBlockFlat, ResnetBlock2D->ResnetBlockFlat, Upsample2D->LinearMultiDim
class CrossAttnUpBlockFlat(nn.Module):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        prev_output_channel: int,
        temb_channels: int,
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
        attn_num_head_channels=1,
        cross_attention_dim=1280,
        output_scale_factor=1.0,
        add_upsample=True,
        dual_cross_attention=False,
Suraj Patil's avatar
Suraj Patil committed
1434
        use_linear_projection=False,
1435
        only_cross_attention=False,
1436
        upcast_attention=False,
1437
1438
1439
1440
1441
    ):
        super().__init__()
        resnets = []
        attentions = []

1442
        self.has_cross_attention = True
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
        self.attn_num_head_channels = attn_num_head_channels

        for i in range(num_layers):
            res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
            resnet_in_channels = prev_output_channel if i == 0 else out_channels

            resnets.append(
                ResnetBlockFlat(
                    in_channels=resnet_in_channels + res_skip_channels,
                    out_channels=out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )
            if not dual_cross_attention:
                attentions.append(
                    Transformer2DModel(
                        attn_num_head_channels,
                        out_channels // attn_num_head_channels,
                        in_channels=out_channels,
                        num_layers=1,
                        cross_attention_dim=cross_attention_dim,
                        norm_num_groups=resnet_groups,
Suraj Patil's avatar
Suraj Patil committed
1472
                        use_linear_projection=use_linear_projection,
1473
                        only_cross_attention=only_cross_attention,
1474
                        upcast_attention=upcast_attention,
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
                    )
                )
            else:
                attentions.append(
                    DualTransformer2DModel(
                        attn_num_head_channels,
                        out_channels // attn_num_head_channels,
                        in_channels=out_channels,
                        num_layers=1,
                        cross_attention_dim=cross_attention_dim,
                        norm_num_groups=resnet_groups,
                    )
                )
        self.attentions = nn.ModuleList(attentions)
        self.resnets = nn.ModuleList(resnets)

        if add_upsample:
            self.upsamplers = nn.ModuleList([LinearMultiDim(out_channels, use_conv=True, out_channels=out_channels)])
        else:
            self.upsamplers = None

        self.gradient_checkpointing = False

    def forward(
        self,
1500
1501
1502
1503
1504
1505
1506
1507
        hidden_states: torch.FloatTensor,
        res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
        temb: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        cross_attention_kwargs: Optional[Dict[str, Any]] = None,
        upsample_size: Optional[int] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
    ):
        for resnet, attn in zip(self.resnets, self.attentions):
            # pop res hidden states
            res_hidden_states = res_hidden_states_tuple[-1]
            res_hidden_states_tuple = res_hidden_states_tuple[:-1]
            hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)

            if self.training and self.gradient_checkpointing:

                def create_custom_forward(module, return_dict=None):
                    def custom_forward(*inputs):
                        if return_dict is not None:
                            return module(*inputs, return_dict=return_dict)
                        else:
                            return module(*inputs)

                    return custom_forward

1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
                ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
                hidden_states = torch.utils.checkpoint.checkpoint(
                    create_custom_forward(resnet),
                    hidden_states,
                    temb,
                    **ckpt_kwargs,
                )
                hidden_states = torch.utils.checkpoint.checkpoint(
                    create_custom_forward(attn, return_dict=False),
                    hidden_states,
                    encoder_hidden_states,
                    None,  # timestep
                    None,  # class_labels
                    cross_attention_kwargs,
                    attention_mask,
                    encoder_attention_mask,
                    **ckpt_kwargs,
                )[0]
1544
1545
            else:
                hidden_states = resnet(hidden_states, temb)
1546
1547
1548
1549
                hidden_states = attn(
                    hidden_states,
                    encoder_hidden_states=encoder_hidden_states,
                    cross_attention_kwargs=cross_attention_kwargs,
1550
1551
                    attention_mask=attention_mask,
                    encoder_attention_mask=encoder_attention_mask,
1552
1553
                    return_dict=False,
                )[0]
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578

        if self.upsamplers is not None:
            for upsampler in self.upsamplers:
                hidden_states = upsampler(hidden_states, upsample_size)

        return hidden_states


# Copied from diffusers.models.unet_2d_blocks.UNetMidBlock2DCrossAttn with UNetMidBlock2DCrossAttn->UNetMidBlockFlatCrossAttn, ResnetBlock2D->ResnetBlockFlat
class UNetMidBlockFlatCrossAttn(nn.Module):
    def __init__(
        self,
        in_channels: int,
        temb_channels: int,
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
        attn_num_head_channels=1,
        output_scale_factor=1.0,
        cross_attention_dim=1280,
        dual_cross_attention=False,
Suraj Patil's avatar
Suraj Patil committed
1579
        use_linear_projection=False,
1580
        upcast_attention=False,
1581
1582
1583
    ):
        super().__init__()

1584
        self.has_cross_attention = True
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
        self.attn_num_head_channels = attn_num_head_channels
        resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)

        # there is always at least one resnet
        resnets = [
            ResnetBlockFlat(
                in_channels=in_channels,
                out_channels=in_channels,
                temb_channels=temb_channels,
                eps=resnet_eps,
                groups=resnet_groups,
                dropout=dropout,
                time_embedding_norm=resnet_time_scale_shift,
                non_linearity=resnet_act_fn,
                output_scale_factor=output_scale_factor,
                pre_norm=resnet_pre_norm,
            )
        ]
        attentions = []

        for _ in range(num_layers):
            if not dual_cross_attention:
                attentions.append(
                    Transformer2DModel(
                        attn_num_head_channels,
                        in_channels // attn_num_head_channels,
                        in_channels=in_channels,
                        num_layers=1,
                        cross_attention_dim=cross_attention_dim,
                        norm_num_groups=resnet_groups,
Suraj Patil's avatar
Suraj Patil committed
1615
                        use_linear_projection=use_linear_projection,
1616
                        upcast_attention=upcast_attention,
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
                    )
                )
            else:
                attentions.append(
                    DualTransformer2DModel(
                        attn_num_head_channels,
                        in_channels // attn_num_head_channels,
                        in_channels=in_channels,
                        num_layers=1,
                        cross_attention_dim=cross_attention_dim,
                        norm_num_groups=resnet_groups,
                    )
                )
            resnets.append(
                ResnetBlockFlat(
                    in_channels=in_channels,
                    out_channels=in_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )

        self.attentions = nn.ModuleList(attentions)
        self.resnets = nn.ModuleList(resnets)

1648
    def forward(
1649
1650
1651
1652
1653
1654
1655
1656
        self,
        hidden_states: torch.FloatTensor,
        temb: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        cross_attention_kwargs: Optional[Dict[str, Any]] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
    ) -> torch.FloatTensor:
1657
1658
        hidden_states = self.resnets[0](hidden_states, temb)
        for attn, resnet in zip(self.attentions, self.resnets[1:]):
1659
1660
1661
1662
            hidden_states = attn(
                hidden_states,
                encoder_hidden_states=encoder_hidden_states,
                cross_attention_kwargs=cross_attention_kwargs,
1663
1664
                attention_mask=attention_mask,
                encoder_attention_mask=encoder_attention_mask,
1665
1666
                return_dict=False,
            )[0]
Will Berman's avatar
Will Berman committed
1667
1668
1669
1670
1671
            hidden_states = resnet(hidden_states, temb)

        return hidden_states


1672
1673
# Copied from diffusers.models.unet_2d_blocks.UNetMidBlock2DSimpleCrossAttn with UNetMidBlock2DSimpleCrossAttn->UNetMidBlockFlatSimpleCrossAttn, ResnetBlock2D->ResnetBlockFlat
class UNetMidBlockFlatSimpleCrossAttn(nn.Module):
Will Berman's avatar
Will Berman committed
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
    def __init__(
        self,
        in_channels: int,
        temb_channels: int,
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
        attn_num_head_channels=1,
        output_scale_factor=1.0,
        cross_attention_dim=1280,
1688
        skip_time_act=False,
1689
        only_cross_attention=False,
1690
        cross_attention_norm=None,
Will Berman's avatar
Will Berman committed
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
    ):
        super().__init__()

        self.has_cross_attention = True

        self.attn_num_head_channels = attn_num_head_channels
        resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)

        self.num_heads = in_channels // self.attn_num_head_channels

        # there is always at least one resnet
        resnets = [
            ResnetBlockFlat(
                in_channels=in_channels,
                out_channels=in_channels,
                temb_channels=temb_channels,
                eps=resnet_eps,
                groups=resnet_groups,
                dropout=dropout,
                time_embedding_norm=resnet_time_scale_shift,
                non_linearity=resnet_act_fn,
                output_scale_factor=output_scale_factor,
                pre_norm=resnet_pre_norm,
1714
                skip_time_act=skip_time_act,
Will Berman's avatar
Will Berman committed
1715
1716
1717
1718
1719
            )
        ]
        attentions = []

        for _ in range(num_layers):
1720
1721
1722
1723
            processor = (
                AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor()
            )

Will Berman's avatar
Will Berman committed
1724
            attentions.append(
Patrick von Platen's avatar
Patrick von Platen committed
1725
                Attention(
Will Berman's avatar
Will Berman committed
1726
1727
1728
1729
1730
1731
1732
1733
                    query_dim=in_channels,
                    cross_attention_dim=in_channels,
                    heads=self.num_heads,
                    dim_head=attn_num_head_channels,
                    added_kv_proj_dim=cross_attention_dim,
                    norm_num_groups=resnet_groups,
                    bias=True,
                    upcast_softmax=True,
1734
                    only_cross_attention=only_cross_attention,
1735
                    cross_attention_norm=cross_attention_norm,
1736
                    processor=processor,
Will Berman's avatar
Will Berman committed
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
                )
            )
            resnets.append(
                ResnetBlockFlat(
                    in_channels=in_channels,
                    out_channels=in_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
1751
                    skip_time_act=skip_time_act,
Will Berman's avatar
Will Berman committed
1752
1753
1754
1755
1756
1757
                )
            )

        self.attentions = nn.ModuleList(attentions)
        self.resnets = nn.ModuleList(resnets)

1758
    def forward(
1759
1760
1761
1762
1763
1764
1765
        self,
        hidden_states: torch.FloatTensor,
        temb: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        cross_attention_kwargs: Optional[Dict[str, Any]] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
1766
1767
    ):
        cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779

        if attention_mask is None:
            # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask.
            mask = None if encoder_hidden_states is None else encoder_attention_mask
        else:
            # when attention_mask is defined: we don't even check for encoder_attention_mask.
            # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks.
            # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask.
            #       then we can simplify this whole if/else block to:
            #         mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask
            mask = attention_mask

Will Berman's avatar
Will Berman committed
1780
1781
1782
1783
1784
        hidden_states = self.resnets[0](hidden_states, temb)
        for attn, resnet in zip(self.attentions, self.resnets[1:]):
            # attn
            hidden_states = attn(
                hidden_states,
1785
                encoder_hidden_states=encoder_hidden_states,
1786
                attention_mask=mask,
1787
                **cross_attention_kwargs,
Will Berman's avatar
Will Berman committed
1788
1789
1790
            )

            # resnet
1791
1792
1793
            hidden_states = resnet(hidden_states, temb)

        return hidden_states