unet_2d_blocks.py 128 KB
Newer Older
Patrick von Platen's avatar
Patrick von Platen committed
1
# Copyright 2023 The HuggingFace Team. All rights reserved.
Patrick von Platen's avatar
Patrick von Platen committed
2
3
4
5
6
7
8
9
10
11
12
13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
from typing import Any, Dict, Optional, Tuple, Union
15

16
import numpy as np
17
import torch
18
import torch.nn.functional as F
Patrick von Platen's avatar
Patrick von Platen committed
19
20
from torch import nn

21
from ..utils import is_torch_version, logging
22
from ..utils.torch_utils import apply_freeu
23
from .activations import get_activation
24
from .attention import AdaGroupNorm
25
from .attention_processor import Attention, AttnAddedKVProcessor, AttnAddedKVProcessor2_0
26
from .dual_transformer_2d import DualTransformer2DModel
27
from .resnet import Downsample2D, FirDownsample2D, FirUpsample2D, KDownsample2D, KUpsample2D, ResnetBlock2D, Upsample2D
28
from .transformer_2d import Transformer2DModel
Patrick von Platen's avatar
Patrick von Platen committed
29
30


31
32
33
logger = logging.get_logger(__name__)  # pylint: disable=invalid-name


34
35
36
37
38
39
40
41
42
def get_down_block(
    down_block_type,
    num_layers,
    in_channels,
    out_channels,
    temb_channels,
    add_downsample,
    resnet_eps,
    resnet_act_fn,
43
    transformer_layers_per_block=1,
44
    num_attention_heads=None,
45
    resnet_groups=None,
46
    cross_attention_dim=None,
Patrick von Platen's avatar
Patrick von Platen committed
47
    downsample_padding=None,
48
    dual_cross_attention=False,
Suraj Patil's avatar
Suraj Patil committed
49
    use_linear_projection=False,
50
    only_cross_attention=False,
51
    upcast_attention=False,
Will Berman's avatar
Will Berman committed
52
    resnet_time_scale_shift="default",
53
    attention_type="default",
54
55
    resnet_skip_time_act=False,
    resnet_out_scale_factor=1.0,
56
    cross_attention_norm=None,
57
    attention_head_dim=None,
58
    downsample_type=None,
59
    dropout=0.0,
60
):
61
62
63
64
65
66
67
    # If attn head dim is not defined, we default it to the number of heads
    if attention_head_dim is None:
        logger.warn(
            f"It is recommended to provide `attention_head_dim` when calling `get_down_block`. Defaulting `attention_head_dim` to {num_attention_heads}."
        )
        attention_head_dim = num_attention_heads

Patrick von Platen's avatar
Patrick von Platen committed
68
69
70
    down_block_type = down_block_type[7:] if down_block_type.startswith("UNetRes") else down_block_type
    if down_block_type == "DownBlock2D":
        return DownBlock2D(
71
72
73
74
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            temb_channels=temb_channels,
75
            dropout=dropout,
76
77
78
            add_downsample=add_downsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
79
            resnet_groups=resnet_groups,
Patrick von Platen's avatar
Patrick von Platen committed
80
            downsample_padding=downsample_padding,
Will Berman's avatar
Will Berman committed
81
82
83
84
85
86
87
88
            resnet_time_scale_shift=resnet_time_scale_shift,
        )
    elif down_block_type == "ResnetDownsampleBlock2D":
        return ResnetDownsampleBlock2D(
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            temb_channels=temb_channels,
89
            dropout=dropout,
Will Berman's avatar
Will Berman committed
90
91
92
93
94
            add_downsample=add_downsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
            resnet_groups=resnet_groups,
            resnet_time_scale_shift=resnet_time_scale_shift,
95
96
            skip_time_act=resnet_skip_time_act,
            output_scale_factor=resnet_out_scale_factor,
97
        )
Patrick von Platen's avatar
Patrick von Platen committed
98
    elif down_block_type == "AttnDownBlock2D":
99
100
101
102
        if add_downsample is False:
            downsample_type = None
        else:
            downsample_type = downsample_type or "conv"  # default to 'conv'
Patrick von Platen's avatar
Patrick von Platen committed
103
        return AttnDownBlock2D(
104
105
106
107
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            temb_channels=temb_channels,
108
            dropout=dropout,
109
110
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
111
            resnet_groups=resnet_groups,
112
            downsample_padding=downsample_padding,
113
            attention_head_dim=attention_head_dim,
Will Berman's avatar
Will Berman committed
114
            resnet_time_scale_shift=resnet_time_scale_shift,
115
            downsample_type=downsample_type,
116
        )
Patrick von Platen's avatar
Patrick von Platen committed
117
    elif down_block_type == "CrossAttnDownBlock2D":
118
        if cross_attention_dim is None:
119
            raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock2D")
Patrick von Platen's avatar
Patrick von Platen committed
120
        return CrossAttnDownBlock2D(
Patrick von Platen's avatar
Patrick von Platen committed
121
            num_layers=num_layers,
122
            transformer_layers_per_block=transformer_layers_per_block,
Patrick von Platen's avatar
Patrick von Platen committed
123
124
125
            in_channels=in_channels,
            out_channels=out_channels,
            temb_channels=temb_channels,
126
            dropout=dropout,
Patrick von Platen's avatar
Patrick von Platen committed
127
128
129
            add_downsample=add_downsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
130
            resnet_groups=resnet_groups,
Patrick von Platen's avatar
Patrick von Platen committed
131
            downsample_padding=downsample_padding,
132
            cross_attention_dim=cross_attention_dim,
133
            num_attention_heads=num_attention_heads,
134
            dual_cross_attention=dual_cross_attention,
Suraj Patil's avatar
Suraj Patil committed
135
            use_linear_projection=use_linear_projection,
136
            only_cross_attention=only_cross_attention,
137
            upcast_attention=upcast_attention,
Will Berman's avatar
Will Berman committed
138
            resnet_time_scale_shift=resnet_time_scale_shift,
139
            attention_type=attention_type,
Will Berman's avatar
Will Berman committed
140
141
142
143
144
145
146
147
148
        )
    elif down_block_type == "SimpleCrossAttnDownBlock2D":
        if cross_attention_dim is None:
            raise ValueError("cross_attention_dim must be specified for SimpleCrossAttnDownBlock2D")
        return SimpleCrossAttnDownBlock2D(
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            temb_channels=temb_channels,
149
            dropout=dropout,
Will Berman's avatar
Will Berman committed
150
151
152
153
154
            add_downsample=add_downsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
            resnet_groups=resnet_groups,
            cross_attention_dim=cross_attention_dim,
155
            attention_head_dim=attention_head_dim,
Will Berman's avatar
Will Berman committed
156
            resnet_time_scale_shift=resnet_time_scale_shift,
157
158
            skip_time_act=resnet_skip_time_act,
            output_scale_factor=resnet_out_scale_factor,
159
            only_cross_attention=only_cross_attention,
160
            cross_attention_norm=cross_attention_norm,
Patrick von Platen's avatar
Patrick von Platen committed
161
        )
Patrick von Platen's avatar
Patrick von Platen committed
162
163
    elif down_block_type == "SkipDownBlock2D":
        return SkipDownBlock2D(
164
165
166
167
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            temb_channels=temb_channels,
168
            dropout=dropout,
169
170
171
172
            add_downsample=add_downsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
            downsample_padding=downsample_padding,
Will Berman's avatar
Will Berman committed
173
            resnet_time_scale_shift=resnet_time_scale_shift,
174
        )
Patrick von Platen's avatar
Patrick von Platen committed
175
176
    elif down_block_type == "AttnSkipDownBlock2D":
        return AttnSkipDownBlock2D(
177
178
179
180
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            temb_channels=temb_channels,
181
            dropout=dropout,
182
183
184
            add_downsample=add_downsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
185
            attention_head_dim=attention_head_dim,
Will Berman's avatar
Will Berman committed
186
            resnet_time_scale_shift=resnet_time_scale_shift,
187
        )
188
189
190
191
192
    elif down_block_type == "DownEncoderBlock2D":
        return DownEncoderBlock2D(
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
193
            dropout=dropout,
194
195
196
            add_downsample=add_downsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
197
            resnet_groups=resnet_groups,
198
            downsample_padding=downsample_padding,
Will Berman's avatar
Will Berman committed
199
            resnet_time_scale_shift=resnet_time_scale_shift,
200
        )
Will Berman's avatar
Will Berman committed
201
202
203
204
205
    elif down_block_type == "AttnDownEncoderBlock2D":
        return AttnDownEncoderBlock2D(
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
206
            dropout=dropout,
Will Berman's avatar
Will Berman committed
207
208
209
210
211
            add_downsample=add_downsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
            resnet_groups=resnet_groups,
            downsample_padding=downsample_padding,
212
            attention_head_dim=attention_head_dim,
Will Berman's avatar
Will Berman committed
213
            resnet_time_scale_shift=resnet_time_scale_shift,
Will Berman's avatar
Will Berman committed
214
        )
215
216
217
218
219
220
    elif down_block_type == "KDownBlock2D":
        return KDownBlock2D(
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            temb_channels=temb_channels,
221
            dropout=dropout,
222
223
224
225
226
227
228
229
230
231
            add_downsample=add_downsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
        )
    elif down_block_type == "KCrossAttnDownBlock2D":
        return KCrossAttnDownBlock2D(
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            temb_channels=temb_channels,
232
            dropout=dropout,
233
234
235
236
            add_downsample=add_downsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
            cross_attention_dim=cross_attention_dim,
237
            attention_head_dim=attention_head_dim,
238
239
            add_self_attention=True if not add_downsample else False,
        )
Will Berman's avatar
Will Berman committed
240
    raise ValueError(f"{down_block_type} does not exist.")
241
242
243
244
245
246


def get_up_block(
    up_block_type,
    num_layers,
    in_channels,
Patrick von Platen's avatar
Patrick von Platen committed
247
248
    out_channels,
    prev_output_channel,
249
250
251
252
    temb_channels,
    add_upsample,
    resnet_eps,
    resnet_act_fn,
253
    resolution_idx=None,
254
    transformer_layers_per_block=1,
255
    num_attention_heads=None,
256
    resnet_groups=None,
257
    cross_attention_dim=None,
258
    dual_cross_attention=False,
Suraj Patil's avatar
Suraj Patil committed
259
    use_linear_projection=False,
260
    only_cross_attention=False,
261
    upcast_attention=False,
Will Berman's avatar
Will Berman committed
262
    resnet_time_scale_shift="default",
263
    attention_type="default",
264
265
    resnet_skip_time_act=False,
    resnet_out_scale_factor=1.0,
266
    cross_attention_norm=None,
267
    attention_head_dim=None,
268
    upsample_type=None,
269
    dropout=0.0,
270
):
271
272
273
274
275
276
277
    # If attn head dim is not defined, we default it to the number of heads
    if attention_head_dim is None:
        logger.warn(
            f"It is recommended to provide `attention_head_dim` when calling `get_up_block`. Defaulting `attention_head_dim` to {num_attention_heads}."
        )
        attention_head_dim = num_attention_heads

Patrick von Platen's avatar
Patrick von Platen committed
278
279
280
    up_block_type = up_block_type[7:] if up_block_type.startswith("UNetRes") else up_block_type
    if up_block_type == "UpBlock2D":
        return UpBlock2D(
281
282
            num_layers=num_layers,
            in_channels=in_channels,
Patrick von Platen's avatar
Patrick von Platen committed
283
284
            out_channels=out_channels,
            prev_output_channel=prev_output_channel,
285
            temb_channels=temb_channels,
286
            resolution_idx=resolution_idx,
287
            dropout=dropout,
288
289
290
            add_upsample=add_upsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
291
            resnet_groups=resnet_groups,
Will Berman's avatar
Will Berman committed
292
293
294
295
296
297
298
299
300
            resnet_time_scale_shift=resnet_time_scale_shift,
        )
    elif up_block_type == "ResnetUpsampleBlock2D":
        return ResnetUpsampleBlock2D(
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            prev_output_channel=prev_output_channel,
            temb_channels=temb_channels,
301
            resolution_idx=resolution_idx,
302
            dropout=dropout,
Will Berman's avatar
Will Berman committed
303
304
305
306
307
            add_upsample=add_upsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
            resnet_groups=resnet_groups,
            resnet_time_scale_shift=resnet_time_scale_shift,
308
309
            skip_time_act=resnet_skip_time_act,
            output_scale_factor=resnet_out_scale_factor,
310
        )
Patrick von Platen's avatar
Patrick von Platen committed
311
    elif up_block_type == "CrossAttnUpBlock2D":
312
313
        if cross_attention_dim is None:
            raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock2D")
Patrick von Platen's avatar
Patrick von Platen committed
314
        return CrossAttnUpBlock2D(
Patrick von Platen's avatar
Patrick von Platen committed
315
            num_layers=num_layers,
316
            transformer_layers_per_block=transformer_layers_per_block,
Patrick von Platen's avatar
Patrick von Platen committed
317
318
319
320
            in_channels=in_channels,
            out_channels=out_channels,
            prev_output_channel=prev_output_channel,
            temb_channels=temb_channels,
321
            resolution_idx=resolution_idx,
322
            dropout=dropout,
Patrick von Platen's avatar
Patrick von Platen committed
323
324
325
            add_upsample=add_upsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
326
            resnet_groups=resnet_groups,
327
            cross_attention_dim=cross_attention_dim,
328
            num_attention_heads=num_attention_heads,
329
            dual_cross_attention=dual_cross_attention,
Suraj Patil's avatar
Suraj Patil committed
330
            use_linear_projection=use_linear_projection,
331
            only_cross_attention=only_cross_attention,
332
            upcast_attention=upcast_attention,
Will Berman's avatar
Will Berman committed
333
            resnet_time_scale_shift=resnet_time_scale_shift,
334
            attention_type=attention_type,
Will Berman's avatar
Will Berman committed
335
336
337
338
339
340
341
342
343
344
        )
    elif up_block_type == "SimpleCrossAttnUpBlock2D":
        if cross_attention_dim is None:
            raise ValueError("cross_attention_dim must be specified for SimpleCrossAttnUpBlock2D")
        return SimpleCrossAttnUpBlock2D(
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            prev_output_channel=prev_output_channel,
            temb_channels=temb_channels,
345
            resolution_idx=resolution_idx,
346
            dropout=dropout,
Will Berman's avatar
Will Berman committed
347
348
349
350
351
            add_upsample=add_upsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
            resnet_groups=resnet_groups,
            cross_attention_dim=cross_attention_dim,
352
            attention_head_dim=attention_head_dim,
Will Berman's avatar
Will Berman committed
353
            resnet_time_scale_shift=resnet_time_scale_shift,
354
355
            skip_time_act=resnet_skip_time_act,
            output_scale_factor=resnet_out_scale_factor,
356
            only_cross_attention=only_cross_attention,
357
            cross_attention_norm=cross_attention_norm,
Patrick von Platen's avatar
Patrick von Platen committed
358
        )
Patrick von Platen's avatar
Patrick von Platen committed
359
    elif up_block_type == "AttnUpBlock2D":
360
361
362
363
364
        if add_upsample is False:
            upsample_type = None
        else:
            upsample_type = upsample_type or "conv"  # default to 'conv'

Patrick von Platen's avatar
Patrick von Platen committed
365
        return AttnUpBlock2D(
366
367
            num_layers=num_layers,
            in_channels=in_channels,
Patrick von Platen's avatar
Patrick von Platen committed
368
369
            out_channels=out_channels,
            prev_output_channel=prev_output_channel,
370
            temb_channels=temb_channels,
371
            resolution_idx=resolution_idx,
372
            dropout=dropout,
373
374
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
375
            resnet_groups=resnet_groups,
376
            attention_head_dim=attention_head_dim,
Will Berman's avatar
Will Berman committed
377
            resnet_time_scale_shift=resnet_time_scale_shift,
378
            upsample_type=upsample_type,
379
        )
Patrick von Platen's avatar
Patrick von Platen committed
380
381
    elif up_block_type == "SkipUpBlock2D":
        return SkipUpBlock2D(
382
383
384
385
386
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            prev_output_channel=prev_output_channel,
            temb_channels=temb_channels,
387
            resolution_idx=resolution_idx,
388
            dropout=dropout,
389
390
391
            add_upsample=add_upsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
Will Berman's avatar
Will Berman committed
392
            resnet_time_scale_shift=resnet_time_scale_shift,
393
        )
Patrick von Platen's avatar
Patrick von Platen committed
394
395
    elif up_block_type == "AttnSkipUpBlock2D":
        return AttnSkipUpBlock2D(
396
397
398
399
400
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            prev_output_channel=prev_output_channel,
            temb_channels=temb_channels,
401
            resolution_idx=resolution_idx,
402
            dropout=dropout,
403
404
405
            add_upsample=add_upsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
406
            attention_head_dim=attention_head_dim,
Will Berman's avatar
Will Berman committed
407
            resnet_time_scale_shift=resnet_time_scale_shift,
408
        )
409
410
411
412
413
    elif up_block_type == "UpDecoderBlock2D":
        return UpDecoderBlock2D(
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
414
            resolution_idx=resolution_idx,
415
            dropout=dropout,
416
417
418
            add_upsample=add_upsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
419
            resnet_groups=resnet_groups,
Will Berman's avatar
Will Berman committed
420
            resnet_time_scale_shift=resnet_time_scale_shift,
YiYi Xu's avatar
YiYi Xu committed
421
            temb_channels=temb_channels,
422
        )
Will Berman's avatar
Will Berman committed
423
424
425
426
427
    elif up_block_type == "AttnUpDecoderBlock2D":
        return AttnUpDecoderBlock2D(
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
428
            resolution_idx=resolution_idx,
429
            dropout=dropout,
Will Berman's avatar
Will Berman committed
430
431
432
433
            add_upsample=add_upsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
            resnet_groups=resnet_groups,
434
            attention_head_dim=attention_head_dim,
Will Berman's avatar
Will Berman committed
435
            resnet_time_scale_shift=resnet_time_scale_shift,
YiYi Xu's avatar
YiYi Xu committed
436
            temb_channels=temb_channels,
Will Berman's avatar
Will Berman committed
437
        )
438
439
440
441
442
443
    elif up_block_type == "KUpBlock2D":
        return KUpBlock2D(
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            temb_channels=temb_channels,
444
            resolution_idx=resolution_idx,
445
            dropout=dropout,
446
447
448
449
450
451
452
453
454
455
            add_upsample=add_upsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
        )
    elif up_block_type == "KCrossAttnUpBlock2D":
        return KCrossAttnUpBlock2D(
            num_layers=num_layers,
            in_channels=in_channels,
            out_channels=out_channels,
            temb_channels=temb_channels,
456
            resolution_idx=resolution_idx,
457
            dropout=dropout,
458
459
460
461
            add_upsample=add_upsample,
            resnet_eps=resnet_eps,
            resnet_act_fn=resnet_act_fn,
            cross_attention_dim=cross_attention_dim,
462
            attention_head_dim=attention_head_dim,
463
464
        )

465
    raise ValueError(f"{up_block_type} does not exist.")
466
467


468
class AutoencoderTinyBlock(nn.Module):
469
    """
Patrick von Platen's avatar
Patrick von Platen committed
470
471
    Tiny Autoencoder block used in [`AutoencoderTiny`]. It is a mini residual module consisting of plain conv + ReLU
    blocks.
472
473
474
475

    Args:
        in_channels (`int`): The number of input channels.
        out_channels (`int`): The number of output channels.
Patrick von Platen's avatar
Patrick von Platen committed
476
477
        act_fn (`str`):
            ` The activation function to use. Supported values are `"swish"`, `"mish"`, `"gelu"`, and `"relu"`.
478
479

    Returns:
Patrick von Platen's avatar
Patrick von Platen committed
480
481
        `torch.FloatTensor`: A tensor with the same shape as the input tensor, but with the number of channels equal to
        `out_channels`.
482
483
    """

484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
    def __init__(self, in_channels: int, out_channels: int, act_fn: str):
        super().__init__()
        act_fn = get_activation(act_fn)
        self.conv = nn.Sequential(
            nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1),
            act_fn,
            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
            act_fn,
            nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1),
        )
        self.skip = (
            nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False)
            if in_channels != out_channels
            else nn.Identity()
        )
        self.fuse = nn.ReLU()

    def forward(self, x):
        return self.fuse(self.conv(x) + self.skip(x))


Patrick von Platen's avatar
Patrick von Platen committed
505
class UNetMidBlock2D(nn.Module):
506
507
508
509
510
511
512
513
514
    """
    A 2D UNet mid-block [`UNetMidBlock2D`] with multiple residual blocks and optional attention blocks.

    Args:
        in_channels (`int`): The number of input channels.
        temb_channels (`int`): The number of temporal embedding channels.
        dropout (`float`, *optional*, defaults to 0.0): The dropout rate.
        num_layers (`int`, *optional*, defaults to 1): The number of residual blocks.
        resnet_eps (`float`, *optional*, 1e-6 ): The epsilon value for the resnet blocks.
Patrick von Platen's avatar
Patrick von Platen committed
515
516
517
        resnet_time_scale_shift (`str`, *optional*, defaults to `default`):
            The type of normalization to apply to the time embeddings. This can help to improve the performance of the
            model on tasks with long-range temporal dependencies.
518
        resnet_act_fn (`str`, *optional*, defaults to `swish`): The activation function for the resnet blocks.
Patrick von Platen's avatar
Patrick von Platen committed
519
520
        resnet_groups (`int`, *optional*, defaults to 32):
            The number of groups to use in the group normalization layers of the resnet blocks.
521
        attn_groups (`Optional[int]`, *optional*, defaults to None): The number of groups for the attention blocks.
Patrick von Platen's avatar
Patrick von Platen committed
522
523
        resnet_pre_norm (`bool`, *optional*, defaults to `True`):
            Whether to use pre-normalization for the resnet blocks.
524
        add_attention (`bool`, *optional*, defaults to `True`): Whether to add attention blocks.
Patrick von Platen's avatar
Patrick von Platen committed
525
526
527
        attention_head_dim (`int`, *optional*, defaults to 1):
            Dimension of a single attention head. The number of attention heads is determined based on this value and
            the number of input channels.
528
529
530
        output_scale_factor (`float`, *optional*, defaults to 1.0): The output scale factor.

    Returns:
Patrick von Platen's avatar
Patrick von Platen committed
531
532
        `torch.FloatTensor`: The output of the last residual block, which is a tensor of shape `(batch_size,
        in_channels, height, width)`.
533
534
535

    """

Patrick von Platen's avatar
Patrick von Platen committed
536
537
538
539
    def __init__(
        self,
        in_channels: int,
        temb_channels: int,
540
        dropout: float = 0.0,
541
        num_layers: int = 1,
Patrick von Platen's avatar
Patrick von Platen committed
542
        resnet_eps: float = 1e-6,
YiYi Xu's avatar
YiYi Xu committed
543
        resnet_time_scale_shift: str = "default",  # default, spatial
Patrick von Platen's avatar
Patrick von Platen committed
544
545
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
546
        attn_groups: Optional[int] = None,
547
        resnet_pre_norm: bool = True,
Will Berman's avatar
Will Berman committed
548
        add_attention: bool = True,
549
        attention_head_dim=1,
Patrick von Platen's avatar
Patrick von Platen committed
550
551
552
        output_scale_factor=1.0,
    ):
        super().__init__()
553
        resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)
Will Berman's avatar
Will Berman committed
554
        self.add_attention = add_attention
Patrick von Platen's avatar
Patrick von Platen committed
555

556
557
558
        if attn_groups is None:
            attn_groups = resnet_groups if resnet_time_scale_shift == "default" else None

559
560
        # there is always at least one resnet
        resnets = [
561
            ResnetBlock2D(
562
563
564
                in_channels=in_channels,
                out_channels=in_channels,
                temb_channels=temb_channels,
565
                eps=resnet_eps,
566
567
568
569
570
571
                groups=resnet_groups,
                dropout=dropout,
                time_embedding_norm=resnet_time_scale_shift,
                non_linearity=resnet_act_fn,
                output_scale_factor=output_scale_factor,
                pre_norm=resnet_pre_norm,
Patrick von Platen's avatar
Patrick von Platen committed
572
            )
573
574
        ]
        attentions = []
Patrick von Platen's avatar
Patrick von Platen committed
575

576
577
578
579
580
581
        if attention_head_dim is None:
            logger.warn(
                f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {in_channels}."
            )
            attention_head_dim = in_channels

582
        for _ in range(num_layers):
Will Berman's avatar
Will Berman committed
583
584
            if self.add_attention:
                attentions.append(
585
                    Attention(
Will Berman's avatar
Will Berman committed
586
                        in_channels,
587
588
                        heads=in_channels // attention_head_dim,
                        dim_head=attention_head_dim,
Will Berman's avatar
Will Berman committed
589
590
                        rescale_output_factor=output_scale_factor,
                        eps=resnet_eps,
591
                        norm_num_groups=attn_groups,
YiYi Xu's avatar
YiYi Xu committed
592
                        spatial_norm_dim=temb_channels if resnet_time_scale_shift == "spatial" else None,
593
594
595
596
                        residual_connection=True,
                        bias=True,
                        upcast_softmax=True,
                        _from_deprecated_attn_block=True,
Will Berman's avatar
Will Berman committed
597
                    )
598
                )
Will Berman's avatar
Will Berman committed
599
600
601
            else:
                attentions.append(None)

602
            resnets.append(
603
                ResnetBlock2D(
604
605
606
                    in_channels=in_channels,
                    out_channels=in_channels,
                    temb_channels=temb_channels,
607
                    eps=resnet_eps,
608
609
610
611
612
613
614
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
Patrick von Platen's avatar
Patrick von Platen committed
615
616
            )

617
618
619
        self.attentions = nn.ModuleList(attentions)
        self.resnets = nn.ModuleList(resnets)

Will Berman's avatar
Will Berman committed
620
    def forward(self, hidden_states, temb=None):
Patrick von Platen's avatar
Patrick von Platen committed
621
        hidden_states = self.resnets[0](hidden_states, temb)
622
        for attn, resnet in zip(self.attentions, self.resnets[1:]):
Will Berman's avatar
Will Berman committed
623
            if attn is not None:
YiYi Xu's avatar
YiYi Xu committed
624
                hidden_states = attn(hidden_states, temb=temb)
Patrick von Platen's avatar
Patrick von Platen committed
625
            hidden_states = resnet(hidden_states, temb)
Patrick von Platen's avatar
Patrick von Platen committed
626

627
        return hidden_states
Patrick von Platen's avatar
Patrick von Platen committed
628

629

Patrick von Platen's avatar
Patrick von Platen committed
630
631
632
633
634
635
636
class UNetMidBlock2DCrossAttn(nn.Module):
    def __init__(
        self,
        in_channels: int,
        temb_channels: int,
        dropout: float = 0.0,
        num_layers: int = 1,
637
        transformer_layers_per_block: Union[int, Tuple[int]] = 1,
Patrick von Platen's avatar
Patrick von Platen committed
638
639
640
641
642
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
643
        num_attention_heads=1,
Patrick von Platen's avatar
Patrick von Platen committed
644
645
        output_scale_factor=1.0,
        cross_attention_dim=1280,
646
        dual_cross_attention=False,
Suraj Patil's avatar
Suraj Patil committed
647
        use_linear_projection=False,
648
        upcast_attention=False,
649
        attention_type="default",
Patrick von Platen's avatar
Patrick von Platen committed
650
651
652
    ):
        super().__init__()

653
        self.has_cross_attention = True
654
        self.num_attention_heads = num_attention_heads
Patrick von Platen's avatar
Patrick von Platen committed
655
656
        resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)

657
658
659
660
        # support for variable transformer layers per block
        if isinstance(transformer_layers_per_block, int):
            transformer_layers_per_block = [transformer_layers_per_block] * num_layers

Patrick von Platen's avatar
Patrick von Platen committed
661
662
        # there is always at least one resnet
        resnets = [
663
            ResnetBlock2D(
Patrick von Platen's avatar
Patrick von Platen committed
664
665
666
667
668
669
670
671
672
673
674
675
676
677
                in_channels=in_channels,
                out_channels=in_channels,
                temb_channels=temb_channels,
                eps=resnet_eps,
                groups=resnet_groups,
                dropout=dropout,
                time_embedding_norm=resnet_time_scale_shift,
                non_linearity=resnet_act_fn,
                output_scale_factor=output_scale_factor,
                pre_norm=resnet_pre_norm,
            )
        ]
        attentions = []

678
        for i in range(num_layers):
679
680
681
            if not dual_cross_attention:
                attentions.append(
                    Transformer2DModel(
682
683
                        num_attention_heads,
                        in_channels // num_attention_heads,
684
                        in_channels=in_channels,
685
                        num_layers=transformer_layers_per_block[i],
686
687
                        cross_attention_dim=cross_attention_dim,
                        norm_num_groups=resnet_groups,
Suraj Patil's avatar
Suraj Patil committed
688
                        use_linear_projection=use_linear_projection,
689
                        upcast_attention=upcast_attention,
690
                        attention_type=attention_type,
691
692
693
694
695
                    )
                )
            else:
                attentions.append(
                    DualTransformer2DModel(
696
697
                        num_attention_heads,
                        in_channels // num_attention_heads,
698
699
700
701
702
                        in_channels=in_channels,
                        num_layers=1,
                        cross_attention_dim=cross_attention_dim,
                        norm_num_groups=resnet_groups,
                    )
Patrick von Platen's avatar
Patrick von Platen committed
703
704
                )
            resnets.append(
705
                ResnetBlock2D(
Patrick von Platen's avatar
Patrick von Platen committed
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
                    in_channels=in_channels,
                    out_channels=in_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )

        self.attentions = nn.ModuleList(attentions)
        self.resnets = nn.ModuleList(resnets)

722
723
        self.gradient_checkpointing = False

724
    def forward(
725
726
727
728
729
730
731
732
        self,
        hidden_states: torch.FloatTensor,
        temb: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        cross_attention_kwargs: Optional[Dict[str, Any]] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
    ) -> torch.FloatTensor:
733
734
        lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0
        hidden_states = self.resnets[0](hidden_states, temb, scale=lora_scale)
Patrick von Platen's avatar
Patrick von Platen committed
735
        for attn, resnet in zip(self.attentions, self.resnets[1:]):
736
737
738
739
740
741
742
743
744
745
746
747
            if self.training and self.gradient_checkpointing:

                def create_custom_forward(module, return_dict=None):
                    def custom_forward(*inputs):
                        if return_dict is not None:
                            return module(*inputs, return_dict=return_dict)
                        else:
                            return module(*inputs)

                    return custom_forward

                ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
ethansmith2000's avatar
ethansmith2000 committed
748
                hidden_states = attn(
749
                    hidden_states,
ethansmith2000's avatar
ethansmith2000 committed
750
751
752
753
754
                    encoder_hidden_states=encoder_hidden_states,
                    cross_attention_kwargs=cross_attention_kwargs,
                    attention_mask=attention_mask,
                    encoder_attention_mask=encoder_attention_mask,
                    return_dict=False,
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
                )[0]
                hidden_states = torch.utils.checkpoint.checkpoint(
                    create_custom_forward(resnet),
                    hidden_states,
                    temb,
                    **ckpt_kwargs,
                )
            else:
                hidden_states = attn(
                    hidden_states,
                    encoder_hidden_states=encoder_hidden_states,
                    cross_attention_kwargs=cross_attention_kwargs,
                    attention_mask=attention_mask,
                    encoder_attention_mask=encoder_attention_mask,
                    return_dict=False,
                )[0]
771
                hidden_states = resnet(hidden_states, temb, scale=lora_scale)
Will Berman's avatar
Will Berman committed
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787

        return hidden_states


class UNetMidBlock2DSimpleCrossAttn(nn.Module):
    def __init__(
        self,
        in_channels: int,
        temb_channels: int,
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
788
        attention_head_dim=1,
Will Berman's avatar
Will Berman committed
789
790
        output_scale_factor=1.0,
        cross_attention_dim=1280,
791
        skip_time_act=False,
792
        only_cross_attention=False,
793
        cross_attention_norm=None,
Will Berman's avatar
Will Berman committed
794
795
796
797
798
    ):
        super().__init__()

        self.has_cross_attention = True

799
        self.attention_head_dim = attention_head_dim
Will Berman's avatar
Will Berman committed
800
801
        resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32)

802
        self.num_heads = in_channels // self.attention_head_dim
Will Berman's avatar
Will Berman committed
803
804
805
806
807
808
809
810
811
812
813
814
815
816

        # there is always at least one resnet
        resnets = [
            ResnetBlock2D(
                in_channels=in_channels,
                out_channels=in_channels,
                temb_channels=temb_channels,
                eps=resnet_eps,
                groups=resnet_groups,
                dropout=dropout,
                time_embedding_norm=resnet_time_scale_shift,
                non_linearity=resnet_act_fn,
                output_scale_factor=output_scale_factor,
                pre_norm=resnet_pre_norm,
817
                skip_time_act=skip_time_act,
Will Berman's avatar
Will Berman committed
818
819
820
821
822
            )
        ]
        attentions = []

        for _ in range(num_layers):
823
824
825
826
            processor = (
                AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor()
            )

Will Berman's avatar
Will Berman committed
827
            attentions.append(
Patrick von Platen's avatar
Patrick von Platen committed
828
                Attention(
Will Berman's avatar
Will Berman committed
829
830
831
                    query_dim=in_channels,
                    cross_attention_dim=in_channels,
                    heads=self.num_heads,
832
                    dim_head=self.attention_head_dim,
Will Berman's avatar
Will Berman committed
833
834
835
836
                    added_kv_proj_dim=cross_attention_dim,
                    norm_num_groups=resnet_groups,
                    bias=True,
                    upcast_softmax=True,
837
                    only_cross_attention=only_cross_attention,
838
                    cross_attention_norm=cross_attention_norm,
839
                    processor=processor,
Will Berman's avatar
Will Berman committed
840
841
842
843
844
845
846
847
848
849
850
851
852
853
                )
            )
            resnets.append(
                ResnetBlock2D(
                    in_channels=in_channels,
                    out_channels=in_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
854
                    skip_time_act=skip_time_act,
Will Berman's avatar
Will Berman committed
855
856
857
858
859
860
                )
            )

        self.attentions = nn.ModuleList(attentions)
        self.resnets = nn.ModuleList(resnets)

861
    def forward(
862
863
864
865
866
867
868
        self,
        hidden_states: torch.FloatTensor,
        temb: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        cross_attention_kwargs: Optional[Dict[str, Any]] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
869
870
    ):
        cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
871
        lora_scale = cross_attention_kwargs.get("scale", 1.0)
872
873
874
875
876
877
878
879
880
881
882
883

        if attention_mask is None:
            # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask.
            mask = None if encoder_hidden_states is None else encoder_attention_mask
        else:
            # when attention_mask is defined: we don't even check for encoder_attention_mask.
            # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks.
            # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask.
            #       then we can simplify this whole if/else block to:
            #         mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask
            mask = attention_mask

884
        hidden_states = self.resnets[0](hidden_states, temb, scale=lora_scale)
Will Berman's avatar
Will Berman committed
885
886
887
888
        for attn, resnet in zip(self.attentions, self.resnets[1:]):
            # attn
            hidden_states = attn(
                hidden_states,
889
                encoder_hidden_states=encoder_hidden_states,
890
                attention_mask=mask,
891
                **cross_attention_kwargs,
Will Berman's avatar
Will Berman committed
892
893
894
            )

            # resnet
895
            hidden_states = resnet(hidden_states, temb, scale=lora_scale)
Patrick von Platen's avatar
Patrick von Platen committed
896
897
898
899

        return hidden_states


Patrick von Platen's avatar
Patrick von Platen committed
900
class AttnDownBlock2D(nn.Module):
901
902
903
904
905
906
907
908
909
910
911
912
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        temb_channels: int,
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
913
        attention_head_dim=1,
914
        output_scale_factor=1.0,
915
        downsample_padding=1,
916
        downsample_type="conv",
917
918
919
920
    ):
        super().__init__()
        resnets = []
        attentions = []
921
        self.downsample_type = downsample_type
922

923
924
925
926
927
928
        if attention_head_dim is None:
            logger.warn(
                f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}."
            )
            attention_head_dim = out_channels

929
930
931
        for i in range(num_layers):
            in_channels = in_channels if i == 0 else out_channels
            resnets.append(
932
                ResnetBlock2D(
933
934
935
936
937
938
939
940
941
942
943
944
945
                    in_channels=in_channels,
                    out_channels=out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )
            attentions.append(
946
                Attention(
947
                    out_channels,
948
949
                    heads=out_channels // attention_head_dim,
                    dim_head=attention_head_dim,
950
                    rescale_output_factor=output_scale_factor,
Patrick von Platen's avatar
Patrick von Platen committed
951
                    eps=resnet_eps,
Will Berman's avatar
Will Berman committed
952
                    norm_num_groups=resnet_groups,
953
954
955
956
                    residual_connection=True,
                    bias=True,
                    upcast_softmax=True,
                    _from_deprecated_attn_block=True,
957
958
959
960
961
962
                )
            )

        self.attentions = nn.ModuleList(attentions)
        self.resnets = nn.ModuleList(resnets)

963
        if downsample_type == "conv":
964
            self.downsamplers = nn.ModuleList(
965
966
                [
                    Downsample2D(
967
                        out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
968
969
                    )
                ]
970
            )
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
        elif downsample_type == "resnet":
            self.downsamplers = nn.ModuleList(
                [
                    ResnetBlock2D(
                        in_channels=out_channels,
                        out_channels=out_channels,
                        temb_channels=temb_channels,
                        eps=resnet_eps,
                        groups=resnet_groups,
                        dropout=dropout,
                        time_embedding_norm=resnet_time_scale_shift,
                        non_linearity=resnet_act_fn,
                        output_scale_factor=output_scale_factor,
                        pre_norm=resnet_pre_norm,
                        down=True,
                    )
                ]
            )
989
990
991
        else:
            self.downsamplers = None

992
993
994
995
996
    def forward(self, hidden_states, temb=None, upsample_size=None, cross_attention_kwargs=None):
        cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}

        lora_scale = cross_attention_kwargs.get("scale", 1.0)

997
998
999
        output_states = ()

        for resnet, attn in zip(self.resnets, self.attentions):
1000
1001
1002
            cross_attention_kwargs.update({"scale": lora_scale})
            hidden_states = resnet(hidden_states, temb, scale=lora_scale)
            hidden_states = attn(hidden_states, **cross_attention_kwargs)
1003
            output_states = output_states + (hidden_states,)
1004
1005
1006

        if self.downsamplers is not None:
            for downsampler in self.downsamplers:
1007
                if self.downsample_type == "resnet":
1008
                    hidden_states = downsampler(hidden_states, temb=temb, scale=lora_scale)
1009
                else:
1010
                    hidden_states = downsampler(hidden_states, scale=lora_scale)
1011
1012
1013
1014
1015
1016

            output_states += (hidden_states,)

        return hidden_states, output_states


Patrick von Platen's avatar
Patrick von Platen committed
1017
class CrossAttnDownBlock2D(nn.Module):
Patrick von Platen's avatar
Patrick von Platen committed
1018
1019
1020
1021
1022
1023
1024
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        temb_channels: int,
        dropout: float = 0.0,
        num_layers: int = 1,
1025
        transformer_layers_per_block: Union[int, Tuple[int]] = 1,
Patrick von Platen's avatar
Patrick von Platen committed
1026
1027
1028
1029
1030
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
1031
        num_attention_heads=1,
Patrick von Platen's avatar
Patrick von Platen committed
1032
1033
1034
1035
        cross_attention_dim=1280,
        output_scale_factor=1.0,
        downsample_padding=1,
        add_downsample=True,
1036
        dual_cross_attention=False,
Suraj Patil's avatar
Suraj Patil committed
1037
        use_linear_projection=False,
1038
        only_cross_attention=False,
1039
        upcast_attention=False,
1040
        attention_type="default",
Patrick von Platen's avatar
Patrick von Platen committed
1041
1042
1043
1044
1045
    ):
        super().__init__()
        resnets = []
        attentions = []

1046
        self.has_cross_attention = True
1047
        self.num_attention_heads = num_attention_heads
1048
1049
        if isinstance(transformer_layers_per_block, int):
            transformer_layers_per_block = [transformer_layers_per_block] * num_layers
Patrick von Platen's avatar
Patrick von Platen committed
1050
1051
1052
1053

        for i in range(num_layers):
            in_channels = in_channels if i == 0 else out_channels
            resnets.append(
1054
                ResnetBlock2D(
Patrick von Platen's avatar
Patrick von Platen committed
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
                    in_channels=in_channels,
                    out_channels=out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )
1067
1068
1069
            if not dual_cross_attention:
                attentions.append(
                    Transformer2DModel(
1070
1071
                        num_attention_heads,
                        out_channels // num_attention_heads,
1072
                        in_channels=out_channels,
1073
                        num_layers=transformer_layers_per_block[i],
1074
1075
                        cross_attention_dim=cross_attention_dim,
                        norm_num_groups=resnet_groups,
Suraj Patil's avatar
Suraj Patil committed
1076
                        use_linear_projection=use_linear_projection,
1077
                        only_cross_attention=only_cross_attention,
1078
                        upcast_attention=upcast_attention,
1079
                        attention_type=attention_type,
1080
1081
1082
1083
1084
                    )
                )
            else:
                attentions.append(
                    DualTransformer2DModel(
1085
1086
                        num_attention_heads,
                        out_channels // num_attention_heads,
1087
1088
1089
1090
1091
                        in_channels=out_channels,
                        num_layers=1,
                        cross_attention_dim=cross_attention_dim,
                        norm_num_groups=resnet_groups,
                    )
Patrick von Platen's avatar
Patrick von Platen committed
1092
1093
1094
1095
1096
1097
1098
1099
                )
        self.attentions = nn.ModuleList(attentions)
        self.resnets = nn.ModuleList(resnets)

        if add_downsample:
            self.downsamplers = nn.ModuleList(
                [
                    Downsample2D(
1100
                        out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
Patrick von Platen's avatar
Patrick von Platen committed
1101
1102
1103
1104
1105
1106
                    )
                ]
            )
        else:
            self.downsamplers = None

1107
1108
        self.gradient_checkpointing = False

1109
    def forward(
1110
1111
1112
1113
1114
1115
1116
        self,
        hidden_states: torch.FloatTensor,
        temb: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        cross_attention_kwargs: Optional[Dict[str, Any]] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
Will Berman's avatar
Will Berman committed
1117
        additional_residuals=None,
1118
    ):
Patrick von Platen's avatar
Patrick von Platen committed
1119
1120
        output_states = ()

1121
1122
        lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0

Will Berman's avatar
Will Berman committed
1123
1124
1125
        blocks = list(zip(self.resnets, self.attentions))

        for i, (resnet, attn) in enumerate(blocks):
1126
1127
            if self.training and self.gradient_checkpointing:

Will Berman's avatar
Will Berman committed
1128
                def create_custom_forward(module, return_dict=None):
1129
                    def custom_forward(*inputs):
Will Berman's avatar
Will Berman committed
1130
1131
1132
1133
                        if return_dict is not None:
                            return module(*inputs, return_dict=return_dict)
                        else:
                            return module(*inputs)
1134
1135
1136

                    return custom_forward

1137
1138
1139
1140
1141
1142
1143
                ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
                hidden_states = torch.utils.checkpoint.checkpoint(
                    create_custom_forward(resnet),
                    hidden_states,
                    temb,
                    **ckpt_kwargs,
                )
ethansmith2000's avatar
ethansmith2000 committed
1144
                hidden_states = attn(
1145
                    hidden_states,
ethansmith2000's avatar
ethansmith2000 committed
1146
1147
1148
1149
1150
                    encoder_hidden_states=encoder_hidden_states,
                    cross_attention_kwargs=cross_attention_kwargs,
                    attention_mask=attention_mask,
                    encoder_attention_mask=encoder_attention_mask,
                    return_dict=False,
1151
                )[0]
1152
            else:
1153
                hidden_states = resnet(hidden_states, temb, scale=lora_scale)
1154
1155
1156
1157
                hidden_states = attn(
                    hidden_states,
                    encoder_hidden_states=encoder_hidden_states,
                    cross_attention_kwargs=cross_attention_kwargs,
1158
1159
                    attention_mask=attention_mask,
                    encoder_attention_mask=encoder_attention_mask,
1160
1161
                    return_dict=False,
                )[0]
1162

Will Berman's avatar
Will Berman committed
1163
1164
1165
1166
            # apply additional residuals to the output of the last pair of resnet and attention blocks
            if i == len(blocks) - 1 and additional_residuals is not None:
                hidden_states = hidden_states + additional_residuals

1167
            output_states = output_states + (hidden_states,)
Patrick von Platen's avatar
Patrick von Platen committed
1168
1169
1170

        if self.downsamplers is not None:
            for downsampler in self.downsamplers:
1171
                hidden_states = downsampler(hidden_states, scale=lora_scale)
Patrick von Platen's avatar
Patrick von Platen committed
1172

1173
            output_states = output_states + (hidden_states,)
Patrick von Platen's avatar
Patrick von Platen committed
1174
1175
1176
1177

        return hidden_states, output_states


Patrick von Platen's avatar
Patrick von Platen committed
1178
class DownBlock2D(nn.Module):
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        temb_channels: int,
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
        output_scale_factor=1.0,
        add_downsample=True,
Patrick von Platen's avatar
Patrick von Platen committed
1193
        downsample_padding=1,
1194
1195
1196
1197
1198
1199
1200
    ):
        super().__init__()
        resnets = []

        for i in range(num_layers):
            in_channels = in_channels if i == 0 else out_channels
            resnets.append(
1201
                ResnetBlock2D(
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
                    in_channels=in_channels,
                    out_channels=out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )

        self.resnets = nn.ModuleList(resnets)

        if add_downsample:
            self.downsamplers = nn.ModuleList(
Patrick von Platen's avatar
Patrick von Platen committed
1219
1220
                [
                    Downsample2D(
1221
                        out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
Patrick von Platen's avatar
Patrick von Platen committed
1222
1223
                    )
                ]
1224
1225
1226
1227
            )
        else:
            self.downsamplers = None

1228
1229
        self.gradient_checkpointing = False

1230
    def forward(self, hidden_states, temb=None, scale: float = 1.0):
1231
1232
1233
        output_states = ()

        for resnet in self.resnets:
1234
1235
1236
1237
1238
1239
1240
1241
            if self.training and self.gradient_checkpointing:

                def create_custom_forward(module):
                    def custom_forward(*inputs):
                        return module(*inputs)

                    return custom_forward

1242
1243
1244
1245
1246
1247
1248
1249
                if is_torch_version(">=", "1.11.0"):
                    hidden_states = torch.utils.checkpoint.checkpoint(
                        create_custom_forward(resnet), hidden_states, temb, use_reentrant=False
                    )
                else:
                    hidden_states = torch.utils.checkpoint.checkpoint(
                        create_custom_forward(resnet), hidden_states, temb
                    )
1250
            else:
1251
                hidden_states = resnet(hidden_states, temb, scale=scale)
1252

1253
            output_states = output_states + (hidden_states,)
1254
1255
1256

        if self.downsamplers is not None:
            for downsampler in self.downsamplers:
1257
                hidden_states = downsampler(hidden_states, scale=scale)
1258

1259
            output_states = output_states + (hidden_states,)
1260
1261
1262
1263

        return hidden_states, output_states


1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
class DownEncoderBlock2D(nn.Module):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
        output_scale_factor=1.0,
        add_downsample=True,
        downsample_padding=1,
    ):
        super().__init__()
        resnets = []

        for i in range(num_layers):
            in_channels = in_channels if i == 0 else out_channels
            resnets.append(
1286
                ResnetBlock2D(
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
                    in_channels=in_channels,
                    out_channels=out_channels,
                    temb_channels=None,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )

        self.resnets = nn.ModuleList(resnets)

        if add_downsample:
            self.downsamplers = nn.ModuleList(
                [
                    Downsample2D(
1306
                        out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
1307
1308
1309
1310
1311
1312
                    )
                ]
            )
        else:
            self.downsamplers = None

1313
    def forward(self, hidden_states, scale: float = 1.0):
1314
        for resnet in self.resnets:
1315
            hidden_states = resnet(hidden_states, temb=None, scale=scale)
1316
1317
1318

        if self.downsamplers is not None:
            for downsampler in self.downsamplers:
1319
                hidden_states = downsampler(hidden_states, scale)
1320
1321
1322
1323

        return hidden_states


1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
class AttnDownEncoderBlock2D(nn.Module):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
1336
        attention_head_dim=1,
1337
1338
1339
1340
1341
1342
1343
1344
        output_scale_factor=1.0,
        add_downsample=True,
        downsample_padding=1,
    ):
        super().__init__()
        resnets = []
        attentions = []

1345
1346
1347
1348
1349
1350
        if attention_head_dim is None:
            logger.warn(
                f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}."
            )
            attention_head_dim = out_channels

1351
1352
1353
        for i in range(num_layers):
            in_channels = in_channels if i == 0 else out_channels
            resnets.append(
1354
                ResnetBlock2D(
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
                    in_channels=in_channels,
                    out_channels=out_channels,
                    temb_channels=None,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )
            attentions.append(
1368
                Attention(
1369
                    out_channels,
1370
1371
                    heads=out_channels // attention_head_dim,
                    dim_head=attention_head_dim,
1372
1373
                    rescale_output_factor=output_scale_factor,
                    eps=resnet_eps,
Will Berman's avatar
Will Berman committed
1374
                    norm_num_groups=resnet_groups,
1375
1376
1377
1378
                    residual_connection=True,
                    bias=True,
                    upcast_softmax=True,
                    _from_deprecated_attn_block=True,
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
                )
            )

        self.attentions = nn.ModuleList(attentions)
        self.resnets = nn.ModuleList(resnets)

        if add_downsample:
            self.downsamplers = nn.ModuleList(
                [
                    Downsample2D(
1389
                        out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op"
1390
1391
1392
1393
1394
1395
                    )
                ]
            )
        else:
            self.downsamplers = None

1396
    def forward(self, hidden_states, scale: float = 1.0):
1397
        for resnet, attn in zip(self.resnets, self.attentions):
1398
1399
1400
            hidden_states = resnet(hidden_states, temb=None, scale=scale)
            cross_attention_kwargs = {"scale": scale}
            hidden_states = attn(hidden_states, **cross_attention_kwargs)
1401
1402
1403

        if self.downsamplers is not None:
            for downsampler in self.downsamplers:
1404
                hidden_states = downsampler(hidden_states, scale)
1405
1406
1407
1408

        return hidden_states


Patrick von Platen's avatar
Patrick von Platen committed
1409
class AttnSkipDownBlock2D(nn.Module):
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        temb_channels: int,
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_pre_norm: bool = True,
1421
        attention_head_dim=1,
1422
1423
1424
1425
1426
1427
1428
        output_scale_factor=np.sqrt(2.0),
        add_downsample=True,
    ):
        super().__init__()
        self.attentions = nn.ModuleList([])
        self.resnets = nn.ModuleList([])

1429
1430
1431
1432
1433
1434
        if attention_head_dim is None:
            logger.warn(
                f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}."
            )
            attention_head_dim = out_channels

1435
1436
1437
        for i in range(num_layers):
            in_channels = in_channels if i == 0 else out_channels
            self.resnets.append(
1438
                ResnetBlock2D(
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
                    in_channels=in_channels,
                    out_channels=out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=min(in_channels // 4, 32),
                    groups_out=min(out_channels // 4, 32),
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )
            self.attentions.append(
1453
                Attention(
1454
                    out_channels,
1455
1456
                    heads=out_channels // attention_head_dim,
                    dim_head=attention_head_dim,
1457
1458
                    rescale_output_factor=output_scale_factor,
                    eps=resnet_eps,
1459
1460
1461
1462
1463
                    norm_num_groups=32,
                    residual_connection=True,
                    bias=True,
                    upcast_softmax=True,
                    _from_deprecated_attn_block=True,
1464
1465
1466
1467
                )
            )

        if add_downsample:
1468
            self.resnet_down = ResnetBlock2D(
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
                in_channels=out_channels,
                out_channels=out_channels,
                temb_channels=temb_channels,
                eps=resnet_eps,
                groups=min(out_channels // 4, 32),
                dropout=dropout,
                time_embedding_norm=resnet_time_scale_shift,
                non_linearity=resnet_act_fn,
                output_scale_factor=output_scale_factor,
                pre_norm=resnet_pre_norm,
1479
                use_in_shortcut=True,
1480
1481
1482
                down=True,
                kernel="fir",
            )
1483
            self.downsamplers = nn.ModuleList([FirDownsample2D(out_channels, out_channels=out_channels)])
1484
1485
1486
1487
1488
1489
            self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1))
        else:
            self.resnet_down = None
            self.downsamplers = None
            self.skip_conv = None

1490
    def forward(self, hidden_states, temb=None, skip_sample=None, scale: float = 1.0):
1491
1492
1493
        output_states = ()

        for resnet, attn in zip(self.resnets, self.attentions):
1494
1495
1496
            hidden_states = resnet(hidden_states, temb, scale=scale)
            cross_attention_kwargs = {"scale": scale}
            hidden_states = attn(hidden_states, **cross_attention_kwargs)
1497
1498
1499
            output_states += (hidden_states,)

        if self.downsamplers is not None:
1500
            hidden_states = self.resnet_down(hidden_states, temb, scale=scale)
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
            for downsampler in self.downsamplers:
                skip_sample = downsampler(skip_sample)

            hidden_states = self.skip_conv(skip_sample) + hidden_states

            output_states += (hidden_states,)

        return hidden_states, output_states, skip_sample


Patrick von Platen's avatar
Patrick von Platen committed
1511
class SkipDownBlock2D(nn.Module):
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        temb_channels: int,
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_pre_norm: bool = True,
        output_scale_factor=np.sqrt(2.0),
        add_downsample=True,
        downsample_padding=1,
    ):
        super().__init__()
        self.resnets = nn.ModuleList([])

        for i in range(num_layers):
            in_channels = in_channels if i == 0 else out_channels
            self.resnets.append(
1533
                ResnetBlock2D(
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
                    in_channels=in_channels,
                    out_channels=out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=min(in_channels // 4, 32),
                    groups_out=min(out_channels // 4, 32),
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )

        if add_downsample:
1549
            self.resnet_down = ResnetBlock2D(
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
                in_channels=out_channels,
                out_channels=out_channels,
                temb_channels=temb_channels,
                eps=resnet_eps,
                groups=min(out_channels // 4, 32),
                dropout=dropout,
                time_embedding_norm=resnet_time_scale_shift,
                non_linearity=resnet_act_fn,
                output_scale_factor=output_scale_factor,
                pre_norm=resnet_pre_norm,
1560
                use_in_shortcut=True,
1561
1562
1563
                down=True,
                kernel="fir",
            )
1564
            self.downsamplers = nn.ModuleList([FirDownsample2D(out_channels, out_channels=out_channels)])
1565
1566
1567
1568
1569
1570
            self.skip_conv = nn.Conv2d(3, out_channels, kernel_size=(1, 1), stride=(1, 1))
        else:
            self.resnet_down = None
            self.downsamplers = None
            self.skip_conv = None

1571
    def forward(self, hidden_states, temb=None, skip_sample=None, scale: float = 1.0):
1572
1573
1574
        output_states = ()

        for resnet in self.resnets:
1575
            hidden_states = resnet(hidden_states, temb, scale)
1576
1577
1578
            output_states += (hidden_states,)

        if self.downsamplers is not None:
1579
            hidden_states = self.resnet_down(hidden_states, temb, scale)
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
            for downsampler in self.downsamplers:
                skip_sample = downsampler(skip_sample)

            hidden_states = self.skip_conv(skip_sample) + hidden_states

            output_states += (hidden_states,)

        return hidden_states, output_states, skip_sample


Will Berman's avatar
Will Berman committed
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
class ResnetDownsampleBlock2D(nn.Module):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        temb_channels: int,
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
        output_scale_factor=1.0,
        add_downsample=True,
1605
        skip_time_act=False,
Will Berman's avatar
Will Berman committed
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
    ):
        super().__init__()
        resnets = []

        for i in range(num_layers):
            in_channels = in_channels if i == 0 else out_channels
            resnets.append(
                ResnetBlock2D(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
1624
                    skip_time_act=skip_time_act,
Will Berman's avatar
Will Berman committed
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
                )
            )

        self.resnets = nn.ModuleList(resnets)

        if add_downsample:
            self.downsamplers = nn.ModuleList(
                [
                    ResnetBlock2D(
                        in_channels=out_channels,
                        out_channels=out_channels,
                        temb_channels=temb_channels,
                        eps=resnet_eps,
                        groups=resnet_groups,
                        dropout=dropout,
                        time_embedding_norm=resnet_time_scale_shift,
                        non_linearity=resnet_act_fn,
                        output_scale_factor=output_scale_factor,
                        pre_norm=resnet_pre_norm,
1644
                        skip_time_act=skip_time_act,
Will Berman's avatar
Will Berman committed
1645
1646
1647
1648
1649
1650
1651
1652
1653
                        down=True,
                    )
                ]
            )
        else:
            self.downsamplers = None

        self.gradient_checkpointing = False

1654
    def forward(self, hidden_states, temb=None, scale: float = 1.0):
Will Berman's avatar
Will Berman committed
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
        output_states = ()

        for resnet in self.resnets:
            if self.training and self.gradient_checkpointing:

                def create_custom_forward(module):
                    def custom_forward(*inputs):
                        return module(*inputs)

                    return custom_forward

1666
1667
1668
1669
1670
1671
1672
1673
                if is_torch_version(">=", "1.11.0"):
                    hidden_states = torch.utils.checkpoint.checkpoint(
                        create_custom_forward(resnet), hidden_states, temb, use_reentrant=False
                    )
                else:
                    hidden_states = torch.utils.checkpoint.checkpoint(
                        create_custom_forward(resnet), hidden_states, temb
                    )
Will Berman's avatar
Will Berman committed
1674
            else:
1675
                hidden_states = resnet(hidden_states, temb, scale)
Will Berman's avatar
Will Berman committed
1676

1677
            output_states = output_states + (hidden_states,)
Will Berman's avatar
Will Berman committed
1678
1679
1680

        if self.downsamplers is not None:
            for downsampler in self.downsamplers:
1681
                hidden_states = downsampler(hidden_states, temb, scale)
Will Berman's avatar
Will Berman committed
1682

1683
            output_states = output_states + (hidden_states,)
Will Berman's avatar
Will Berman committed
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700

        return hidden_states, output_states


class SimpleCrossAttnDownBlock2D(nn.Module):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        temb_channels: int,
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
1701
        attention_head_dim=1,
Will Berman's avatar
Will Berman committed
1702
1703
1704
        cross_attention_dim=1280,
        output_scale_factor=1.0,
        add_downsample=True,
1705
        skip_time_act=False,
1706
        only_cross_attention=False,
1707
        cross_attention_norm=None,
Will Berman's avatar
Will Berman committed
1708
1709
1710
1711
1712
1713
1714
1715
    ):
        super().__init__()

        self.has_cross_attention = True

        resnets = []
        attentions = []

1716
1717
        self.attention_head_dim = attention_head_dim
        self.num_heads = out_channels // self.attention_head_dim
Will Berman's avatar
Will Berman committed
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732

        for i in range(num_layers):
            in_channels = in_channels if i == 0 else out_channels
            resnets.append(
                ResnetBlock2D(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
1733
                    skip_time_act=skip_time_act,
Will Berman's avatar
Will Berman committed
1734
1735
                )
            )
1736
1737
1738
1739
1740

            processor = (
                AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor()
            )

Will Berman's avatar
Will Berman committed
1741
            attentions.append(
Patrick von Platen's avatar
Patrick von Platen committed
1742
                Attention(
Will Berman's avatar
Will Berman committed
1743
1744
1745
                    query_dim=out_channels,
                    cross_attention_dim=out_channels,
                    heads=self.num_heads,
1746
                    dim_head=attention_head_dim,
Will Berman's avatar
Will Berman committed
1747
1748
1749
1750
                    added_kv_proj_dim=cross_attention_dim,
                    norm_num_groups=resnet_groups,
                    bias=True,
                    upcast_softmax=True,
1751
                    only_cross_attention=only_cross_attention,
1752
                    cross_attention_norm=cross_attention_norm,
1753
                    processor=processor,
Will Berman's avatar
Will Berman committed
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
                )
            )
        self.attentions = nn.ModuleList(attentions)
        self.resnets = nn.ModuleList(resnets)

        if add_downsample:
            self.downsamplers = nn.ModuleList(
                [
                    ResnetBlock2D(
                        in_channels=out_channels,
                        out_channels=out_channels,
                        temb_channels=temb_channels,
                        eps=resnet_eps,
                        groups=resnet_groups,
                        dropout=dropout,
                        time_embedding_norm=resnet_time_scale_shift,
                        non_linearity=resnet_act_fn,
                        output_scale_factor=output_scale_factor,
                        pre_norm=resnet_pre_norm,
1773
                        skip_time_act=skip_time_act,
Will Berman's avatar
Will Berman committed
1774
1775
1776
1777
1778
1779
1780
1781
1782
                        down=True,
                    )
                ]
            )
        else:
            self.downsamplers = None

        self.gradient_checkpointing = False

1783
    def forward(
1784
1785
1786
1787
1788
1789
1790
        self,
        hidden_states: torch.FloatTensor,
        temb: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        cross_attention_kwargs: Optional[Dict[str, Any]] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
1791
    ):
Will Berman's avatar
Will Berman committed
1792
        output_states = ()
1793
        cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
Will Berman's avatar
Will Berman committed
1794

1795
1796
        lora_scale = cross_attention_kwargs.get("scale", 1.0)

1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
        if attention_mask is None:
            # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask.
            mask = None if encoder_hidden_states is None else encoder_attention_mask
        else:
            # when attention_mask is defined: we don't even check for encoder_attention_mask.
            # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks.
            # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask.
            #       then we can simplify this whole if/else block to:
            #         mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask
            mask = attention_mask

Will Berman's avatar
Will Berman committed
1808
        for resnet, attn in zip(self.resnets, self.attentions):
1809
            if self.training and self.gradient_checkpointing:
Will Berman's avatar
Will Berman committed
1810

1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
                def create_custom_forward(module, return_dict=None):
                    def custom_forward(*inputs):
                        if return_dict is not None:
                            return module(*inputs, return_dict=return_dict)
                        else:
                            return module(*inputs)

                    return custom_forward

                hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
ethansmith2000's avatar
ethansmith2000 committed
1821
                hidden_states = attn(
1822
                    hidden_states,
ethansmith2000's avatar
ethansmith2000 committed
1823
1824
1825
1826
                    encoder_hidden_states=encoder_hidden_states,
                    attention_mask=mask,
                    **cross_attention_kwargs,
                )
1827
            else:
1828
                hidden_states = resnet(hidden_states, temb, scale=lora_scale)
1829
1830
1831
1832

                hidden_states = attn(
                    hidden_states,
                    encoder_hidden_states=encoder_hidden_states,
1833
                    attention_mask=mask,
1834
1835
                    **cross_attention_kwargs,
                )
Will Berman's avatar
Will Berman committed
1836

1837
            output_states = output_states + (hidden_states,)
Will Berman's avatar
Will Berman committed
1838
1839
1840

        if self.downsamplers is not None:
            for downsampler in self.downsamplers:
1841
                hidden_states = downsampler(hidden_states, temb, scale=lora_scale)
Will Berman's avatar
Will Berman committed
1842

1843
            output_states = output_states + (hidden_states,)
Will Berman's avatar
Will Berman committed
1844
1845
1846
1847

        return hidden_states, output_states


1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
class KDownBlock2D(nn.Module):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        temb_channels: int,
        dropout: float = 0.0,
        num_layers: int = 4,
        resnet_eps: float = 1e-5,
        resnet_act_fn: str = "gelu",
        resnet_group_size: int = 32,
        add_downsample=False,
    ):
        super().__init__()
        resnets = []

        for i in range(num_layers):
            in_channels = in_channels if i == 0 else out_channels
            groups = in_channels // resnet_group_size
            groups_out = out_channels // resnet_group_size

            resnets.append(
                ResnetBlock2D(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    dropout=dropout,
                    temb_channels=temb_channels,
                    groups=groups,
                    groups_out=groups_out,
                    eps=resnet_eps,
                    non_linearity=resnet_act_fn,
                    time_embedding_norm="ada_group",
                    conv_shortcut_bias=False,
                )
            )

        self.resnets = nn.ModuleList(resnets)

        if add_downsample:
            # YiYi's comments- might be able to use FirDownsample2D, look into details later
            self.downsamplers = nn.ModuleList([KDownsample2D()])
        else:
            self.downsamplers = None

        self.gradient_checkpointing = False

1894
    def forward(self, hidden_states, temb=None, scale: float = 1.0):
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
        output_states = ()

        for resnet in self.resnets:
            if self.training and self.gradient_checkpointing:

                def create_custom_forward(module):
                    def custom_forward(*inputs):
                        return module(*inputs)

                    return custom_forward

1906
1907
1908
1909
1910
1911
1912
1913
                if is_torch_version(">=", "1.11.0"):
                    hidden_states = torch.utils.checkpoint.checkpoint(
                        create_custom_forward(resnet), hidden_states, temb, use_reentrant=False
                    )
                else:
                    hidden_states = torch.utils.checkpoint.checkpoint(
                        create_custom_forward(resnet), hidden_states, temb
                    )
1914
            else:
1915
                hidden_states = resnet(hidden_states, temb, scale)
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936

            output_states += (hidden_states,)

        if self.downsamplers is not None:
            for downsampler in self.downsamplers:
                hidden_states = downsampler(hidden_states)

        return hidden_states, output_states


class KCrossAttnDownBlock2D(nn.Module):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        temb_channels: int,
        cross_attention_dim: int,
        dropout: float = 0.0,
        num_layers: int = 4,
        resnet_group_size: int = 32,
        add_downsample=True,
1937
        attention_head_dim: int = 64,
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
        add_self_attention: bool = False,
        resnet_eps: float = 1e-5,
        resnet_act_fn: str = "gelu",
    ):
        super().__init__()
        resnets = []
        attentions = []

        self.has_cross_attention = True

        for i in range(num_layers):
            in_channels = in_channels if i == 0 else out_channels
            groups = in_channels // resnet_group_size
            groups_out = out_channels // resnet_group_size

            resnets.append(
                ResnetBlock2D(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    dropout=dropout,
                    temb_channels=temb_channels,
                    groups=groups,
                    groups_out=groups_out,
                    eps=resnet_eps,
                    non_linearity=resnet_act_fn,
                    time_embedding_norm="ada_group",
                    conv_shortcut_bias=False,
                )
            )
            attentions.append(
                KAttentionBlock(
                    out_channels,
1970
1971
                    out_channels // attention_head_dim,
                    attention_head_dim,
1972
1973
1974
1975
                    cross_attention_dim=cross_attention_dim,
                    temb_channels=temb_channels,
                    attention_bias=True,
                    add_self_attention=add_self_attention,
1976
                    cross_attention_norm="layer_norm",
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
                    group_size=resnet_group_size,
                )
            )

        self.resnets = nn.ModuleList(resnets)
        self.attentions = nn.ModuleList(attentions)

        if add_downsample:
            self.downsamplers = nn.ModuleList([KDownsample2D()])
        else:
            self.downsamplers = None

        self.gradient_checkpointing = False

    def forward(
1992
1993
1994
1995
1996
1997
1998
        self,
        hidden_states: torch.FloatTensor,
        temb: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        cross_attention_kwargs: Optional[Dict[str, Any]] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
1999
2000
    ):
        output_states = ()
2001
        lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014

        for resnet, attn in zip(self.resnets, self.attentions):
            if self.training and self.gradient_checkpointing:

                def create_custom_forward(module, return_dict=None):
                    def custom_forward(*inputs):
                        if return_dict is not None:
                            return module(*inputs, return_dict=return_dict)
                        else:
                            return module(*inputs)

                    return custom_forward

2015
2016
2017
2018
2019
2020
2021
                ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
                hidden_states = torch.utils.checkpoint.checkpoint(
                    create_custom_forward(resnet),
                    hidden_states,
                    temb,
                    **ckpt_kwargs,
                )
ethansmith2000's avatar
ethansmith2000 committed
2022
                hidden_states = attn(
2023
                    hidden_states,
ethansmith2000's avatar
ethansmith2000 committed
2024
2025
2026
2027
2028
                    encoder_hidden_states=encoder_hidden_states,
                    emb=temb,
                    attention_mask=attention_mask,
                    cross_attention_kwargs=cross_attention_kwargs,
                    encoder_attention_mask=encoder_attention_mask,
2029
                )
2030
            else:
2031
                hidden_states = resnet(hidden_states, temb, scale=lora_scale)
2032
2033
2034
2035
2036
2037
                hidden_states = attn(
                    hidden_states,
                    encoder_hidden_states=encoder_hidden_states,
                    emb=temb,
                    attention_mask=attention_mask,
                    cross_attention_kwargs=cross_attention_kwargs,
2038
                    encoder_attention_mask=encoder_attention_mask,
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
                )

            if self.downsamplers is None:
                output_states += (None,)
            else:
                output_states += (hidden_states,)

        if self.downsamplers is not None:
            for downsampler in self.downsamplers:
                hidden_states = downsampler(hidden_states)

        return hidden_states, output_states


Patrick von Platen's avatar
Patrick von Platen committed
2053
class AttnUpBlock2D(nn.Module):
2054
2055
2056
    def __init__(
        self,
        in_channels: int,
Patrick von Platen's avatar
Patrick von Platen committed
2057
2058
        prev_output_channel: int,
        out_channels: int,
2059
        temb_channels: int,
2060
        resolution_idx: int = None,
2061
2062
2063
2064
2065
2066
2067
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
2068
        attention_head_dim=1,
2069
        output_scale_factor=1.0,
2070
        upsample_type="conv",
2071
2072
2073
2074
2075
    ):
        super().__init__()
        resnets = []
        attentions = []

2076
2077
        self.upsample_type = upsample_type

2078
2079
2080
2081
2082
2083
        if attention_head_dim is None:
            logger.warn(
                f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `in_channels`: {out_channels}."
            )
            attention_head_dim = out_channels

2084
        for i in range(num_layers):
Patrick von Platen's avatar
Patrick von Platen committed
2085
2086
2087
            res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
            resnet_in_channels = prev_output_channel if i == 0 else out_channels

2088
            resnets.append(
2089
                ResnetBlock2D(
Patrick von Platen's avatar
Patrick von Platen committed
2090
2091
                    in_channels=resnet_in_channels + res_skip_channels,
                    out_channels=out_channels,
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )
            attentions.append(
2103
                Attention(
Patrick von Platen's avatar
Patrick von Platen committed
2104
                    out_channels,
2105
2106
                    heads=out_channels // attention_head_dim,
                    dim_head=attention_head_dim,
2107
                    rescale_output_factor=output_scale_factor,
Patrick von Platen's avatar
Patrick von Platen committed
2108
                    eps=resnet_eps,
Will Berman's avatar
Will Berman committed
2109
                    norm_num_groups=resnet_groups,
2110
2111
2112
2113
                    residual_connection=True,
                    bias=True,
                    upcast_softmax=True,
                    _from_deprecated_attn_block=True,
2114
2115
2116
2117
2118
2119
                )
            )

        self.attentions = nn.ModuleList(attentions)
        self.resnets = nn.ModuleList(resnets)

2120
        if upsample_type == "conv":
Patrick von Platen's avatar
Patrick von Platen committed
2121
            self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
        elif upsample_type == "resnet":
            self.upsamplers = nn.ModuleList(
                [
                    ResnetBlock2D(
                        in_channels=out_channels,
                        out_channels=out_channels,
                        temb_channels=temb_channels,
                        eps=resnet_eps,
                        groups=resnet_groups,
                        dropout=dropout,
                        time_embedding_norm=resnet_time_scale_shift,
                        non_linearity=resnet_act_fn,
                        output_scale_factor=output_scale_factor,
                        pre_norm=resnet_pre_norm,
                        up=True,
                    )
                ]
            )
2140
2141
2142
        else:
            self.upsamplers = None

2143
2144
        self.resolution_idx = resolution_idx

2145
    def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, scale: float = 1.0):
2146
2147
2148
2149
2150
2151
        for resnet, attn in zip(self.resnets, self.attentions):
            # pop res hidden states
            res_hidden_states = res_hidden_states_tuple[-1]
            res_hidden_states_tuple = res_hidden_states_tuple[:-1]
            hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)

2152
2153
2154
            hidden_states = resnet(hidden_states, temb, scale=scale)
            cross_attention_kwargs = {"scale": scale}
            hidden_states = attn(hidden_states, **cross_attention_kwargs)
2155
2156
2157

        if self.upsamplers is not None:
            for upsampler in self.upsamplers:
2158
                if self.upsample_type == "resnet":
2159
                    hidden_states = upsampler(hidden_states, temb=temb, scale=scale)
2160
                else:
2161
                    hidden_states = upsampler(hidden_states, scale=scale)
2162
2163
2164
2165

        return hidden_states


Patrick von Platen's avatar
Patrick von Platen committed
2166
class CrossAttnUpBlock2D(nn.Module):
Patrick von Platen's avatar
Patrick von Platen committed
2167
2168
2169
2170
2171
2172
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        prev_output_channel: int,
        temb_channels: int,
2173
        resolution_idx: int = None,
Patrick von Platen's avatar
Patrick von Platen committed
2174
2175
        dropout: float = 0.0,
        num_layers: int = 1,
2176
        transformer_layers_per_block: Union[int, Tuple[int]] = 1,
Patrick von Platen's avatar
Patrick von Platen committed
2177
2178
2179
2180
2181
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
2182
        num_attention_heads=1,
Patrick von Platen's avatar
Patrick von Platen committed
2183
2184
2185
        cross_attention_dim=1280,
        output_scale_factor=1.0,
        add_upsample=True,
2186
        dual_cross_attention=False,
Suraj Patil's avatar
Suraj Patil committed
2187
        use_linear_projection=False,
2188
        only_cross_attention=False,
2189
        upcast_attention=False,
2190
        attention_type="default",
Patrick von Platen's avatar
Patrick von Platen committed
2191
2192
2193
2194
2195
    ):
        super().__init__()
        resnets = []
        attentions = []

2196
        self.has_cross_attention = True
2197
        self.num_attention_heads = num_attention_heads
Patrick von Platen's avatar
Patrick von Platen committed
2198

2199
2200
2201
        if isinstance(transformer_layers_per_block, int):
            transformer_layers_per_block = [transformer_layers_per_block] * num_layers

Patrick von Platen's avatar
Patrick von Platen committed
2202
2203
2204
2205
2206
        for i in range(num_layers):
            res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
            resnet_in_channels = prev_output_channel if i == 0 else out_channels

            resnets.append(
2207
                ResnetBlock2D(
Patrick von Platen's avatar
Patrick von Platen committed
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
                    in_channels=resnet_in_channels + res_skip_channels,
                    out_channels=out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )
2220
2221
2222
            if not dual_cross_attention:
                attentions.append(
                    Transformer2DModel(
2223
2224
                        num_attention_heads,
                        out_channels // num_attention_heads,
2225
                        in_channels=out_channels,
2226
                        num_layers=transformer_layers_per_block[i],
2227
2228
                        cross_attention_dim=cross_attention_dim,
                        norm_num_groups=resnet_groups,
Suraj Patil's avatar
Suraj Patil committed
2229
                        use_linear_projection=use_linear_projection,
2230
                        only_cross_attention=only_cross_attention,
2231
                        upcast_attention=upcast_attention,
2232
                        attention_type=attention_type,
2233
2234
2235
2236
2237
                    )
                )
            else:
                attentions.append(
                    DualTransformer2DModel(
2238
2239
                        num_attention_heads,
                        out_channels // num_attention_heads,
2240
2241
2242
2243
2244
                        in_channels=out_channels,
                        num_layers=1,
                        cross_attention_dim=cross_attention_dim,
                        norm_num_groups=resnet_groups,
                    )
Patrick von Platen's avatar
Patrick von Platen committed
2245
2246
2247
2248
2249
2250
2251
2252
2253
                )
        self.attentions = nn.ModuleList(attentions)
        self.resnets = nn.ModuleList(resnets)

        if add_upsample:
            self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
        else:
            self.upsamplers = None

2254
        self.gradient_checkpointing = False
2255
        self.resolution_idx = resolution_idx
2256
2257
2258

    def forward(
        self,
2259
2260
2261
2262
2263
2264
2265
2266
        hidden_states: torch.FloatTensor,
        res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
        temb: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        cross_attention_kwargs: Optional[Dict[str, Any]] = None,
        upsample_size: Optional[int] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
2267
    ):
2268
        lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0
2269
2270
2271
2272
2273
2274
        is_freeu_enabled = (
            getattr(self, "s1", None)
            and getattr(self, "s2", None)
            and getattr(self, "b1", None)
            and getattr(self, "b2", None)
        )
2275

Patrick von Platen's avatar
Patrick von Platen committed
2276
2277
2278
2279
        for resnet, attn in zip(self.resnets, self.attentions):
            # pop res hidden states
            res_hidden_states = res_hidden_states_tuple[-1]
            res_hidden_states_tuple = res_hidden_states_tuple[:-1]
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292

            # FreeU: Only operate on the first two stages
            if is_freeu_enabled:
                hidden_states, res_hidden_states = apply_freeu(
                    self.resolution_idx,
                    hidden_states,
                    res_hidden_states,
                    s1=self.s1,
                    s2=self.s2,
                    b1=self.b1,
                    b2=self.b2,
                )

Patrick von Platen's avatar
Patrick von Platen committed
2293
2294
            hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)

2295
2296
            if self.training and self.gradient_checkpointing:

Will Berman's avatar
Will Berman committed
2297
                def create_custom_forward(module, return_dict=None):
2298
                    def custom_forward(*inputs):
Will Berman's avatar
Will Berman committed
2299
2300
2301
2302
                        if return_dict is not None:
                            return module(*inputs, return_dict=return_dict)
                        else:
                            return module(*inputs)
2303
2304
2305

                    return custom_forward

2306
2307
2308
2309
2310
2311
2312
                ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
                hidden_states = torch.utils.checkpoint.checkpoint(
                    create_custom_forward(resnet),
                    hidden_states,
                    temb,
                    **ckpt_kwargs,
                )
ethansmith2000's avatar
ethansmith2000 committed
2313
                hidden_states = attn(
2314
                    hidden_states,
ethansmith2000's avatar
ethansmith2000 committed
2315
2316
2317
2318
2319
                    encoder_hidden_states=encoder_hidden_states,
                    cross_attention_kwargs=cross_attention_kwargs,
                    attention_mask=attention_mask,
                    encoder_attention_mask=encoder_attention_mask,
                    return_dict=False,
2320
                )[0]
2321
            else:
2322
                hidden_states = resnet(hidden_states, temb, scale=lora_scale)
2323
2324
2325
2326
                hidden_states = attn(
                    hidden_states,
                    encoder_hidden_states=encoder_hidden_states,
                    cross_attention_kwargs=cross_attention_kwargs,
2327
2328
                    attention_mask=attention_mask,
                    encoder_attention_mask=encoder_attention_mask,
2329
2330
                    return_dict=False,
                )[0]
Patrick von Platen's avatar
Patrick von Platen committed
2331
2332
2333

        if self.upsamplers is not None:
            for upsampler in self.upsamplers:
2334
                hidden_states = upsampler(hidden_states, upsample_size, scale=lora_scale)
Patrick von Platen's avatar
Patrick von Platen committed
2335
2336
2337
2338

        return hidden_states


Patrick von Platen's avatar
Patrick von Platen committed
2339
class UpBlock2D(nn.Module):
2340
2341
2342
    def __init__(
        self,
        in_channels: int,
Patrick von Platen's avatar
Patrick von Platen committed
2343
2344
        prev_output_channel: int,
        out_channels: int,
2345
        temb_channels: int,
2346
        resolution_idx: int = None,
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
        output_scale_factor=1.0,
        add_upsample=True,
    ):
        super().__init__()
        resnets = []

        for i in range(num_layers):
Patrick von Platen's avatar
Patrick von Platen committed
2361
2362
2363
            res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
            resnet_in_channels = prev_output_channel if i == 0 else out_channels

2364
            resnets.append(
2365
                ResnetBlock2D(
Patrick von Platen's avatar
Patrick von Platen committed
2366
2367
                    in_channels=resnet_in_channels + res_skip_channels,
                    out_channels=out_channels,
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )

        self.resnets = nn.ModuleList(resnets)

        if add_upsample:
Patrick von Platen's avatar
Patrick von Platen committed
2382
            self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
2383
2384
2385
        else:
            self.upsamplers = None

2386
        self.gradient_checkpointing = False
2387
        self.resolution_idx = resolution_idx
2388

2389
    def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, scale: float = 1.0):
2390
2391
2392
2393
2394
2395
2396
        is_freeu_enabled = (
            getattr(self, "s1", None)
            and getattr(self, "s2", None)
            and getattr(self, "b1", None)
            and getattr(self, "b2", None)
        )

2397
2398
2399
2400
        for resnet in self.resnets:
            # pop res hidden states
            res_hidden_states = res_hidden_states_tuple[-1]
            res_hidden_states_tuple = res_hidden_states_tuple[:-1]
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413

            # FreeU: Only operate on the first two stages
            if is_freeu_enabled:
                hidden_states, res_hidden_states = apply_freeu(
                    self.resolution_idx,
                    hidden_states,
                    res_hidden_states,
                    s1=self.s1,
                    s2=self.s2,
                    b1=self.b1,
                    b2=self.b2,
                )

2414
2415
            hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)

2416
2417
2418
2419
2420
2421
2422
2423
            if self.training and self.gradient_checkpointing:

                def create_custom_forward(module):
                    def custom_forward(*inputs):
                        return module(*inputs)

                    return custom_forward

2424
2425
2426
2427
2428
2429
2430
2431
                if is_torch_version(">=", "1.11.0"):
                    hidden_states = torch.utils.checkpoint.checkpoint(
                        create_custom_forward(resnet), hidden_states, temb, use_reentrant=False
                    )
                else:
                    hidden_states = torch.utils.checkpoint.checkpoint(
                        create_custom_forward(resnet), hidden_states, temb
                    )
2432
            else:
2433
                hidden_states = resnet(hidden_states, temb, scale=scale)
2434
2435
2436

        if self.upsamplers is not None:
            for upsampler in self.upsamplers:
2437
                hidden_states = upsampler(hidden_states, upsample_size, scale=scale)
2438
2439

        return hidden_states
2440
2441


2442
2443
2444
2445
2446
class UpDecoderBlock2D(nn.Module):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
2447
        resolution_idx: int = None,
2448
2449
2450
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
YiYi Xu's avatar
YiYi Xu committed
2451
        resnet_time_scale_shift: str = "default",  # default, spatial
2452
2453
2454
2455
2456
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
        output_scale_factor=1.0,
        add_upsample=True,
YiYi Xu's avatar
YiYi Xu committed
2457
        temb_channels=None,
2458
2459
2460
2461
2462
2463
2464
2465
    ):
        super().__init__()
        resnets = []

        for i in range(num_layers):
            input_channels = in_channels if i == 0 else out_channels

            resnets.append(
2466
                ResnetBlock2D(
2467
2468
                    in_channels=input_channels,
                    out_channels=out_channels,
YiYi Xu's avatar
YiYi Xu committed
2469
                    temb_channels=temb_channels,
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )

        self.resnets = nn.ModuleList(resnets)

        if add_upsample:
            self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
        else:
            self.upsamplers = None

2487
2488
        self.resolution_idx = resolution_idx

2489
    def forward(self, hidden_states, temb=None, scale: float = 1.0):
2490
        for resnet in self.resnets:
2491
            hidden_states = resnet(hidden_states, temb=temb, scale=scale)
2492
2493
2494
2495
2496
2497
2498
2499

        if self.upsamplers is not None:
            for upsampler in self.upsamplers:
                hidden_states = upsampler(hidden_states)

        return hidden_states


2500
2501
2502
2503
2504
class AttnUpDecoderBlock2D(nn.Module):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
2505
        resolution_idx: int = None,
2506
2507
2508
2509
2510
2511
2512
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
2513
        attention_head_dim=1,
2514
2515
        output_scale_factor=1.0,
        add_upsample=True,
YiYi Xu's avatar
YiYi Xu committed
2516
        temb_channels=None,
2517
2518
2519
2520
2521
    ):
        super().__init__()
        resnets = []
        attentions = []

2522
2523
2524
2525
2526
2527
        if attention_head_dim is None:
            logger.warn(
                f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `out_channels`: {out_channels}."
            )
            attention_head_dim = out_channels

2528
2529
2530
2531
        for i in range(num_layers):
            input_channels = in_channels if i == 0 else out_channels

            resnets.append(
2532
                ResnetBlock2D(
2533
2534
                    in_channels=input_channels,
                    out_channels=out_channels,
YiYi Xu's avatar
YiYi Xu committed
2535
                    temb_channels=temb_channels,
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )
            attentions.append(
2546
                Attention(
2547
                    out_channels,
2548
2549
                    heads=out_channels // attention_head_dim,
                    dim_head=attention_head_dim,
2550
2551
                    rescale_output_factor=output_scale_factor,
                    eps=resnet_eps,
2552
                    norm_num_groups=resnet_groups if resnet_time_scale_shift != "spatial" else None,
YiYi Xu's avatar
YiYi Xu committed
2553
                    spatial_norm_dim=temb_channels if resnet_time_scale_shift == "spatial" else None,
2554
2555
2556
2557
                    residual_connection=True,
                    bias=True,
                    upcast_softmax=True,
                    _from_deprecated_attn_block=True,
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
                )
            )

        self.attentions = nn.ModuleList(attentions)
        self.resnets = nn.ModuleList(resnets)

        if add_upsample:
            self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
        else:
            self.upsamplers = None

2569
2570
        self.resolution_idx = resolution_idx

2571
    def forward(self, hidden_states, temb=None, scale: float = 1.0):
2572
        for resnet, attn in zip(self.resnets, self.attentions):
2573
2574
2575
            hidden_states = resnet(hidden_states, temb=temb, scale=scale)
            cross_attention_kwargs = {"scale": scale}
            hidden_states = attn(hidden_states, temb=temb, **cross_attention_kwargs)
2576
2577
2578

        if self.upsamplers is not None:
            for upsampler in self.upsamplers:
2579
                hidden_states = upsampler(hidden_states, scale=scale)
2580
2581
2582
2583

        return hidden_states


Patrick von Platen's avatar
Patrick von Platen committed
2584
class AttnSkipUpBlock2D(nn.Module):
2585
2586
2587
2588
2589
2590
    def __init__(
        self,
        in_channels: int,
        prev_output_channel: int,
        out_channels: int,
        temb_channels: int,
2591
        resolution_idx: int = None,
2592
2593
2594
2595
2596
2597
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_pre_norm: bool = True,
2598
        attention_head_dim=1,
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
        output_scale_factor=np.sqrt(2.0),
        add_upsample=True,
    ):
        super().__init__()
        self.attentions = nn.ModuleList([])
        self.resnets = nn.ModuleList([])

        for i in range(num_layers):
            res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
            resnet_in_channels = prev_output_channel if i == 0 else out_channels

            self.resnets.append(
2611
                ResnetBlock2D(
2612
2613
2614
2615
2616
2617
2618
2619
2620
2621
2622
2623
2624
2625
                    in_channels=resnet_in_channels + res_skip_channels,
                    out_channels=out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=min(resnet_in_channels + res_skip_channels // 4, 32),
                    groups_out=min(out_channels // 4, 32),
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )

2626
2627
2628
2629
2630
2631
        if attention_head_dim is None:
            logger.warn(
                f"It is not recommend to pass `attention_head_dim=None`. Defaulting `attention_head_dim` to `out_channels`: {out_channels}."
            )
            attention_head_dim = out_channels

2632
        self.attentions.append(
2633
            Attention(
2634
                out_channels,
2635
2636
                heads=out_channels // attention_head_dim,
                dim_head=attention_head_dim,
2637
2638
                rescale_output_factor=output_scale_factor,
                eps=resnet_eps,
2639
2640
2641
2642
2643
                norm_num_groups=32,
                residual_connection=True,
                bias=True,
                upcast_softmax=True,
                _from_deprecated_attn_block=True,
2644
2645
2646
2647
2648
            )
        )

        self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels)
        if add_upsample:
2649
            self.resnet_up = ResnetBlock2D(
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660
                in_channels=out_channels,
                out_channels=out_channels,
                temb_channels=temb_channels,
                eps=resnet_eps,
                groups=min(out_channels // 4, 32),
                groups_out=min(out_channels // 4, 32),
                dropout=dropout,
                time_embedding_norm=resnet_time_scale_shift,
                non_linearity=resnet_act_fn,
                output_scale_factor=output_scale_factor,
                pre_norm=resnet_pre_norm,
2661
                use_in_shortcut=True,
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
                up=True,
                kernel="fir",
            )
            self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
            self.skip_norm = torch.nn.GroupNorm(
                num_groups=min(out_channels // 4, 32), num_channels=out_channels, eps=resnet_eps, affine=True
            )
            self.act = nn.SiLU()
        else:
            self.resnet_up = None
            self.skip_conv = None
            self.skip_norm = None
            self.act = None

2676
2677
        self.resolution_idx = resolution_idx

2678
    def forward(self, hidden_states, res_hidden_states_tuple, temb=None, skip_sample=None, scale: float = 1.0):
2679
2680
2681
2682
2683
2684
        for resnet in self.resnets:
            # pop res hidden states
            res_hidden_states = res_hidden_states_tuple[-1]
            res_hidden_states_tuple = res_hidden_states_tuple[:-1]
            hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)

2685
            hidden_states = resnet(hidden_states, temb, scale=scale)
2686

2687
2688
        cross_attention_kwargs = {"scale": scale}
        hidden_states = self.attentions[0](hidden_states, **cross_attention_kwargs)
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701

        if skip_sample is not None:
            skip_sample = self.upsampler(skip_sample)
        else:
            skip_sample = 0

        if self.resnet_up is not None:
            skip_sample_states = self.skip_norm(hidden_states)
            skip_sample_states = self.act(skip_sample_states)
            skip_sample_states = self.skip_conv(skip_sample_states)

            skip_sample = skip_sample + skip_sample_states

2702
            hidden_states = self.resnet_up(hidden_states, temb, scale=scale)
2703
2704
2705
2706

        return hidden_states, skip_sample


Patrick von Platen's avatar
Patrick von Platen committed
2707
class SkipUpBlock2D(nn.Module):
2708
2709
2710
2711
2712
2713
    def __init__(
        self,
        in_channels: int,
        prev_output_channel: int,
        out_channels: int,
        temb_channels: int,
2714
        resolution_idx: int = None,
2715
2716
2717
2718
2719
2720
2721
2722
2723
2724
2725
2726
2727
2728
2729
2730
2731
2732
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_pre_norm: bool = True,
        output_scale_factor=np.sqrt(2.0),
        add_upsample=True,
        upsample_padding=1,
    ):
        super().__init__()
        self.resnets = nn.ModuleList([])

        for i in range(num_layers):
            res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
            resnet_in_channels = prev_output_channel if i == 0 else out_channels

            self.resnets.append(
2733
                ResnetBlock2D(
2734
2735
2736
2737
2738
2739
2740
2741
2742
2743
2744
2745
2746
2747
2748
2749
                    in_channels=resnet_in_channels + res_skip_channels,
                    out_channels=out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=min((resnet_in_channels + res_skip_channels) // 4, 32),
                    groups_out=min(out_channels // 4, 32),
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
                )
            )

        self.upsampler = FirUpsample2D(in_channels, out_channels=out_channels)
        if add_upsample:
2750
            self.resnet_up = ResnetBlock2D(
2751
2752
2753
2754
2755
2756
2757
2758
2759
2760
2761
                in_channels=out_channels,
                out_channels=out_channels,
                temb_channels=temb_channels,
                eps=resnet_eps,
                groups=min(out_channels // 4, 32),
                groups_out=min(out_channels // 4, 32),
                dropout=dropout,
                time_embedding_norm=resnet_time_scale_shift,
                non_linearity=resnet_act_fn,
                output_scale_factor=output_scale_factor,
                pre_norm=resnet_pre_norm,
2762
                use_in_shortcut=True,
2763
2764
2765
2766
2767
2768
2769
2770
2771
2772
2773
2774
2775
2776
                up=True,
                kernel="fir",
            )
            self.skip_conv = nn.Conv2d(out_channels, 3, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
            self.skip_norm = torch.nn.GroupNorm(
                num_groups=min(out_channels // 4, 32), num_channels=out_channels, eps=resnet_eps, affine=True
            )
            self.act = nn.SiLU()
        else:
            self.resnet_up = None
            self.skip_conv = None
            self.skip_norm = None
            self.act = None

2777
2778
        self.resolution_idx = resolution_idx

2779
    def forward(self, hidden_states, res_hidden_states_tuple, temb=None, skip_sample=None, scale: float = 1.0):
2780
2781
2782
2783
2784
2785
        for resnet in self.resnets:
            # pop res hidden states
            res_hidden_states = res_hidden_states_tuple[-1]
            res_hidden_states_tuple = res_hidden_states_tuple[:-1]
            hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)

2786
            hidden_states = resnet(hidden_states, temb, scale=scale)
2787
2788
2789
2790
2791
2792
2793
2794
2795
2796
2797
2798
2799

        if skip_sample is not None:
            skip_sample = self.upsampler(skip_sample)
        else:
            skip_sample = 0

        if self.resnet_up is not None:
            skip_sample_states = self.skip_norm(hidden_states)
            skip_sample_states = self.act(skip_sample_states)
            skip_sample_states = self.skip_conv(skip_sample_states)

            skip_sample = skip_sample + skip_sample_states

2800
            hidden_states = self.resnet_up(hidden_states, temb, scale=scale)
2801
2802

        return hidden_states, skip_sample
Will Berman's avatar
Will Berman committed
2803
2804
2805
2806
2807
2808
2809
2810
2811


class ResnetUpsampleBlock2D(nn.Module):
    def __init__(
        self,
        in_channels: int,
        prev_output_channel: int,
        out_channels: int,
        temb_channels: int,
2812
        resolution_idx: int = None,
Will Berman's avatar
Will Berman committed
2813
2814
2815
2816
2817
2818
2819
2820
2821
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
        output_scale_factor=1.0,
        add_upsample=True,
2822
        skip_time_act=False,
Will Berman's avatar
Will Berman committed
2823
2824
2825
2826
2827
2828
2829
2830
2831
2832
2833
2834
2835
2836
2837
2838
2839
2840
2841
2842
    ):
        super().__init__()
        resnets = []

        for i in range(num_layers):
            res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
            resnet_in_channels = prev_output_channel if i == 0 else out_channels

            resnets.append(
                ResnetBlock2D(
                    in_channels=resnet_in_channels + res_skip_channels,
                    out_channels=out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
2843
                    skip_time_act=skip_time_act,
Will Berman's avatar
Will Berman committed
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854
2855
2856
2857
2858
2859
2860
2861
2862
                )
            )

        self.resnets = nn.ModuleList(resnets)

        if add_upsample:
            self.upsamplers = nn.ModuleList(
                [
                    ResnetBlock2D(
                        in_channels=out_channels,
                        out_channels=out_channels,
                        temb_channels=temb_channels,
                        eps=resnet_eps,
                        groups=resnet_groups,
                        dropout=dropout,
                        time_embedding_norm=resnet_time_scale_shift,
                        non_linearity=resnet_act_fn,
                        output_scale_factor=output_scale_factor,
                        pre_norm=resnet_pre_norm,
2863
                        skip_time_act=skip_time_act,
Will Berman's avatar
Will Berman committed
2864
2865
2866
2867
2868
2869
2870
2871
                        up=True,
                    )
                ]
            )
        else:
            self.upsamplers = None

        self.gradient_checkpointing = False
2872
        self.resolution_idx = resolution_idx
Will Berman's avatar
Will Berman committed
2873

2874
    def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, scale: float = 1.0):
Will Berman's avatar
Will Berman committed
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885
2886
2887
2888
        for resnet in self.resnets:
            # pop res hidden states
            res_hidden_states = res_hidden_states_tuple[-1]
            res_hidden_states_tuple = res_hidden_states_tuple[:-1]
            hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)

            if self.training and self.gradient_checkpointing:

                def create_custom_forward(module):
                    def custom_forward(*inputs):
                        return module(*inputs)

                    return custom_forward

2889
2890
2891
2892
2893
2894
2895
2896
                if is_torch_version(">=", "1.11.0"):
                    hidden_states = torch.utils.checkpoint.checkpoint(
                        create_custom_forward(resnet), hidden_states, temb, use_reentrant=False
                    )
                else:
                    hidden_states = torch.utils.checkpoint.checkpoint(
                        create_custom_forward(resnet), hidden_states, temb
                    )
Will Berman's avatar
Will Berman committed
2897
            else:
2898
                hidden_states = resnet(hidden_states, temb, scale=scale)
Will Berman's avatar
Will Berman committed
2899
2900
2901

        if self.upsamplers is not None:
            for upsampler in self.upsamplers:
2902
                hidden_states = upsampler(hidden_states, temb, scale=scale)
Will Berman's avatar
Will Berman committed
2903
2904
2905
2906
2907
2908
2909
2910
2911
2912
2913

        return hidden_states


class SimpleCrossAttnUpBlock2D(nn.Module):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        prev_output_channel: int,
        temb_channels: int,
2914
        resolution_idx: int = None,
Will Berman's avatar
Will Berman committed
2915
2916
2917
2918
2919
2920
2921
        dropout: float = 0.0,
        num_layers: int = 1,
        resnet_eps: float = 1e-6,
        resnet_time_scale_shift: str = "default",
        resnet_act_fn: str = "swish",
        resnet_groups: int = 32,
        resnet_pre_norm: bool = True,
2922
        attention_head_dim=1,
Will Berman's avatar
Will Berman committed
2923
2924
2925
        cross_attention_dim=1280,
        output_scale_factor=1.0,
        add_upsample=True,
2926
        skip_time_act=False,
2927
        only_cross_attention=False,
2928
        cross_attention_norm=None,
Will Berman's avatar
Will Berman committed
2929
2930
2931
2932
2933
2934
    ):
        super().__init__()
        resnets = []
        attentions = []

        self.has_cross_attention = True
2935
        self.attention_head_dim = attention_head_dim
Will Berman's avatar
Will Berman committed
2936

2937
        self.num_heads = out_channels // self.attention_head_dim
Will Berman's avatar
Will Berman committed
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949
2950
2951
2952
2953
2954

        for i in range(num_layers):
            res_skip_channels = in_channels if (i == num_layers - 1) else out_channels
            resnet_in_channels = prev_output_channel if i == 0 else out_channels

            resnets.append(
                ResnetBlock2D(
                    in_channels=resnet_in_channels + res_skip_channels,
                    out_channels=out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=resnet_groups,
                    dropout=dropout,
                    time_embedding_norm=resnet_time_scale_shift,
                    non_linearity=resnet_act_fn,
                    output_scale_factor=output_scale_factor,
                    pre_norm=resnet_pre_norm,
2955
                    skip_time_act=skip_time_act,
Will Berman's avatar
Will Berman committed
2956
2957
                )
            )
2958
2959
2960
2961
2962

            processor = (
                AttnAddedKVProcessor2_0() if hasattr(F, "scaled_dot_product_attention") else AttnAddedKVProcessor()
            )

Will Berman's avatar
Will Berman committed
2963
            attentions.append(
Patrick von Platen's avatar
Patrick von Platen committed
2964
                Attention(
Will Berman's avatar
Will Berman committed
2965
2966
2967
                    query_dim=out_channels,
                    cross_attention_dim=out_channels,
                    heads=self.num_heads,
2968
                    dim_head=self.attention_head_dim,
Will Berman's avatar
Will Berman committed
2969
2970
2971
2972
                    added_kv_proj_dim=cross_attention_dim,
                    norm_num_groups=resnet_groups,
                    bias=True,
                    upcast_softmax=True,
2973
                    only_cross_attention=only_cross_attention,
2974
                    cross_attention_norm=cross_attention_norm,
2975
                    processor=processor,
Will Berman's avatar
Will Berman committed
2976
2977
2978
2979
2980
2981
2982
2983
2984
2985
2986
2987
2988
2989
2990
2991
2992
2993
2994
                )
            )
        self.attentions = nn.ModuleList(attentions)
        self.resnets = nn.ModuleList(resnets)

        if add_upsample:
            self.upsamplers = nn.ModuleList(
                [
                    ResnetBlock2D(
                        in_channels=out_channels,
                        out_channels=out_channels,
                        temb_channels=temb_channels,
                        eps=resnet_eps,
                        groups=resnet_groups,
                        dropout=dropout,
                        time_embedding_norm=resnet_time_scale_shift,
                        non_linearity=resnet_act_fn,
                        output_scale_factor=output_scale_factor,
                        pre_norm=resnet_pre_norm,
2995
                        skip_time_act=skip_time_act,
Will Berman's avatar
Will Berman committed
2996
2997
2998
2999
3000
3001
3002
3003
                        up=True,
                    )
                ]
            )
        else:
            self.upsamplers = None

        self.gradient_checkpointing = False
3004
        self.resolution_idx = resolution_idx
Will Berman's avatar
Will Berman committed
3005
3006
3007

    def forward(
        self,
3008
3009
3010
3011
3012
3013
3014
3015
        hidden_states: torch.FloatTensor,
        res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
        temb: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        upsample_size: Optional[int] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        cross_attention_kwargs: Optional[Dict[str, Any]] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
Will Berman's avatar
Will Berman committed
3016
    ):
3017
        cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
3018

3019
        lora_scale = cross_attention_kwargs.get("scale", 1.0)
3020
3021
3022
3023
3024
3025
3026
3027
3028
3029
3030
        if attention_mask is None:
            # if encoder_hidden_states is defined: we are doing cross-attn, so we should use cross-attn mask.
            mask = None if encoder_hidden_states is None else encoder_attention_mask
        else:
            # when attention_mask is defined: we don't even check for encoder_attention_mask.
            # this is to maintain compatibility with UnCLIP, which uses 'attention_mask' param for cross-attn masks.
            # TODO: UnCLIP should express cross-attn mask via encoder_attention_mask param instead of via attention_mask.
            #       then we can simplify this whole if/else block to:
            #         mask = attention_mask if encoder_hidden_states is None else encoder_attention_mask
            mask = attention_mask

Will Berman's avatar
Will Berman committed
3031
3032
3033
3034
3035
3036
3037
        for resnet, attn in zip(self.resnets, self.attentions):
            # resnet
            # pop res hidden states
            res_hidden_states = res_hidden_states_tuple[-1]
            res_hidden_states_tuple = res_hidden_states_tuple[:-1]
            hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)

3038
            if self.training and self.gradient_checkpointing:
Will Berman's avatar
Will Berman committed
3039

3040
3041
3042
3043
3044
3045
3046
3047
3048
3049
                def create_custom_forward(module, return_dict=None):
                    def custom_forward(*inputs):
                        if return_dict is not None:
                            return module(*inputs, return_dict=return_dict)
                        else:
                            return module(*inputs)

                    return custom_forward

                hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
ethansmith2000's avatar
ethansmith2000 committed
3050
                hidden_states = attn(
3051
                    hidden_states,
ethansmith2000's avatar
ethansmith2000 committed
3052
3053
3054
3055
                    encoder_hidden_states=encoder_hidden_states,
                    attention_mask=mask,
                    **cross_attention_kwargs,
                )
3056
            else:
3057
                hidden_states = resnet(hidden_states, temb, scale=lora_scale)
3058
3059
3060
3061

                hidden_states = attn(
                    hidden_states,
                    encoder_hidden_states=encoder_hidden_states,
3062
                    attention_mask=mask,
3063
3064
                    **cross_attention_kwargs,
                )
Will Berman's avatar
Will Berman committed
3065
3066
3067

        if self.upsamplers is not None:
            for upsampler in self.upsamplers:
3068
                hidden_states = upsampler(hidden_states, temb, scale=lora_scale)
Will Berman's avatar
Will Berman committed
3069
3070

        return hidden_states
3071
3072
3073
3074
3075
3076
3077
3078


class KUpBlock2D(nn.Module):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        temb_channels: int,
3079
        resolution_idx: int,
3080
3081
3082
3083
3084
3085
3086
3087
3088
3089
3090
3091
3092
3093
3094
3095
3096
3097
3098
3099
3100
3101
3102
3103
3104
3105
3106
3107
3108
3109
3110
3111
3112
3113
3114
3115
3116
3117
3118
3119
3120
        dropout: float = 0.0,
        num_layers: int = 5,
        resnet_eps: float = 1e-5,
        resnet_act_fn: str = "gelu",
        resnet_group_size: Optional[int] = 32,
        add_upsample=True,
    ):
        super().__init__()
        resnets = []
        k_in_channels = 2 * out_channels
        k_out_channels = in_channels
        num_layers = num_layers - 1

        for i in range(num_layers):
            in_channels = k_in_channels if i == 0 else out_channels
            groups = in_channels // resnet_group_size
            groups_out = out_channels // resnet_group_size

            resnets.append(
                ResnetBlock2D(
                    in_channels=in_channels,
                    out_channels=k_out_channels if (i == num_layers - 1) else out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=groups,
                    groups_out=groups_out,
                    dropout=dropout,
                    non_linearity=resnet_act_fn,
                    time_embedding_norm="ada_group",
                    conv_shortcut_bias=False,
                )
            )

        self.resnets = nn.ModuleList(resnets)

        if add_upsample:
            self.upsamplers = nn.ModuleList([KUpsample2D()])
        else:
            self.upsamplers = None

        self.gradient_checkpointing = False
3121
        self.resolution_idx = resolution_idx
3122

3123
    def forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None, scale: float = 1.0):
3124
3125
3126
3127
3128
3129
3130
3131
3132
3133
3134
3135
3136
        res_hidden_states_tuple = res_hidden_states_tuple[-1]
        if res_hidden_states_tuple is not None:
            hidden_states = torch.cat([hidden_states, res_hidden_states_tuple], dim=1)

        for resnet in self.resnets:
            if self.training and self.gradient_checkpointing:

                def create_custom_forward(module):
                    def custom_forward(*inputs):
                        return module(*inputs)

                    return custom_forward

3137
3138
3139
3140
3141
3142
3143
3144
                if is_torch_version(">=", "1.11.0"):
                    hidden_states = torch.utils.checkpoint.checkpoint(
                        create_custom_forward(resnet), hidden_states, temb, use_reentrant=False
                    )
                else:
                    hidden_states = torch.utils.checkpoint.checkpoint(
                        create_custom_forward(resnet), hidden_states, temb
                    )
3145
            else:
3146
                hidden_states = resnet(hidden_states, temb, scale=scale)
3147
3148
3149
3150
3151
3152
3153
3154
3155
3156
3157
3158
3159
3160

        if self.upsamplers is not None:
            for upsampler in self.upsamplers:
                hidden_states = upsampler(hidden_states)

        return hidden_states


class KCrossAttnUpBlock2D(nn.Module):
    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        temb_channels: int,
3161
        resolution_idx: int,
3162
3163
3164
3165
3166
        dropout: float = 0.0,
        num_layers: int = 4,
        resnet_eps: float = 1e-5,
        resnet_act_fn: str = "gelu",
        resnet_group_size: int = 32,
3167
        attention_head_dim=1,  # attention dim_head
3168
3169
3170
3171
3172
3173
3174
3175
3176
3177
3178
3179
3180
        cross_attention_dim: int = 768,
        add_upsample: bool = True,
        upcast_attention: bool = False,
    ):
        super().__init__()
        resnets = []
        attentions = []

        is_first_block = in_channels == out_channels == temb_channels
        is_middle_block = in_channels != out_channels
        add_self_attention = True if is_first_block else False

        self.has_cross_attention = True
3181
        self.attention_head_dim = attention_head_dim
3182
3183
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209
3210
3211
3212
3213
3214
3215
3216

        # in_channels, and out_channels for the block (k-unet)
        k_in_channels = out_channels if is_first_block else 2 * out_channels
        k_out_channels = in_channels

        num_layers = num_layers - 1

        for i in range(num_layers):
            in_channels = k_in_channels if i == 0 else out_channels
            groups = in_channels // resnet_group_size
            groups_out = out_channels // resnet_group_size

            if is_middle_block and (i == num_layers - 1):
                conv_2d_out_channels = k_out_channels
            else:
                conv_2d_out_channels = None

            resnets.append(
                ResnetBlock2D(
                    in_channels=in_channels,
                    out_channels=out_channels,
                    conv_2d_out_channels=conv_2d_out_channels,
                    temb_channels=temb_channels,
                    eps=resnet_eps,
                    groups=groups,
                    groups_out=groups_out,
                    dropout=dropout,
                    non_linearity=resnet_act_fn,
                    time_embedding_norm="ada_group",
                    conv_shortcut_bias=False,
                )
            )
            attentions.append(
                KAttentionBlock(
                    k_out_channels if (i == num_layers - 1) else out_channels,
3217
                    k_out_channels // attention_head_dim
3218
                    if (i == num_layers - 1)
3219
3220
                    else out_channels // attention_head_dim,
                    attention_head_dim,
3221
3222
3223
3224
                    cross_attention_dim=cross_attention_dim,
                    temb_channels=temb_channels,
                    attention_bias=True,
                    add_self_attention=add_self_attention,
3225
                    cross_attention_norm="layer_norm",
3226
3227
3228
3229
3230
3231
3232
3233
3234
3235
3236
3237
3238
                    upcast_attention=upcast_attention,
                )
            )

        self.resnets = nn.ModuleList(resnets)
        self.attentions = nn.ModuleList(attentions)

        if add_upsample:
            self.upsamplers = nn.ModuleList([KUpsample2D()])
        else:
            self.upsamplers = None

        self.gradient_checkpointing = False
3239
        self.resolution_idx = resolution_idx
3240
3241
3242

    def forward(
        self,
3243
3244
3245
3246
3247
3248
3249
3250
        hidden_states: torch.FloatTensor,
        res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],
        temb: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        cross_attention_kwargs: Optional[Dict[str, Any]] = None,
        upsample_size: Optional[int] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
3251
3252
3253
3254
3255
    ):
        res_hidden_states_tuple = res_hidden_states_tuple[-1]
        if res_hidden_states_tuple is not None:
            hidden_states = torch.cat([hidden_states, res_hidden_states_tuple], dim=1)

3256
        lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0
3257
3258
3259
3260
3261
3262
3263
3264
3265
3266
3267
3268
        for resnet, attn in zip(self.resnets, self.attentions):
            if self.training and self.gradient_checkpointing:

                def create_custom_forward(module, return_dict=None):
                    def custom_forward(*inputs):
                        if return_dict is not None:
                            return module(*inputs, return_dict=return_dict)
                        else:
                            return module(*inputs)

                    return custom_forward

3269
3270
3271
3272
3273
3274
3275
                ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
                hidden_states = torch.utils.checkpoint.checkpoint(
                    create_custom_forward(resnet),
                    hidden_states,
                    temb,
                    **ckpt_kwargs,
                )
ethansmith2000's avatar
ethansmith2000 committed
3276
                hidden_states = attn(
3277
                    hidden_states,
ethansmith2000's avatar
ethansmith2000 committed
3278
3279
3280
3281
3282
3283
                    encoder_hidden_states=encoder_hidden_states,
                    emb=temb,
                    attention_mask=attention_mask,
                    cross_attention_kwargs=cross_attention_kwargs,
                    encoder_attention_mask=encoder_attention_mask,
                )
3284
            else:
3285
                hidden_states = resnet(hidden_states, temb, scale=lora_scale)
3286
3287
3288
3289
3290
3291
                hidden_states = attn(
                    hidden_states,
                    encoder_hidden_states=encoder_hidden_states,
                    emb=temb,
                    attention_mask=attention_mask,
                    cross_attention_kwargs=cross_attention_kwargs,
3292
                    encoder_attention_mask=encoder_attention_mask,
3293
3294
3295
3296
3297
3298
3299
3300
3301
3302
3303
3304
3305
3306
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317
3318
3319
3320
3321
3322
3323
3324
3325
3326
3327
3328
3329
3330
                )

        if self.upsamplers is not None:
            for upsampler in self.upsamplers:
                hidden_states = upsampler(hidden_states)

        return hidden_states


# can potentially later be renamed to `No-feed-forward` attention
class KAttentionBlock(nn.Module):
    r"""
    A basic Transformer block.

    Parameters:
        dim (`int`): The number of channels in the input and output.
        num_attention_heads (`int`): The number of heads to use for multi-head attention.
        attention_head_dim (`int`): The number of channels in each head.
        dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
        cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
        activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
        num_embeds_ada_norm (:
            obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
        attention_bias (:
            obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
    """

    def __init__(
        self,
        dim: int,
        num_attention_heads: int,
        attention_head_dim: int,
        dropout: float = 0.0,
        cross_attention_dim: Optional[int] = None,
        attention_bias: bool = False,
        upcast_attention: bool = False,
        temb_channels: int = 768,  # for ada_group_norm
        add_self_attention: bool = False,
3331
        cross_attention_norm: Optional[str] = None,
3332
3333
3334
3335
3336
3337
3338
3339
        group_size: int = 32,
    ):
        super().__init__()
        self.add_self_attention = add_self_attention

        # 1. Self-Attn
        if add_self_attention:
            self.norm1 = AdaGroupNorm(temb_channels, dim, max(1, dim // group_size))
Patrick von Platen's avatar
Patrick von Platen committed
3340
            self.attn1 = Attention(
3341
3342
3343
3344
3345
3346
                query_dim=dim,
                heads=num_attention_heads,
                dim_head=attention_head_dim,
                dropout=dropout,
                bias=attention_bias,
                cross_attention_dim=None,
3347
                cross_attention_norm=None,
3348
3349
3350
3351
            )

        # 2. Cross-Attn
        self.norm2 = AdaGroupNorm(temb_channels, dim, max(1, dim // group_size))
Patrick von Platen's avatar
Patrick von Platen committed
3352
        self.attn2 = Attention(
3353
3354
3355
3356
3357
3358
3359
3360
3361
3362
3363
3364
3365
3366
3367
3368
3369
3370
            query_dim=dim,
            cross_attention_dim=cross_attention_dim,
            heads=num_attention_heads,
            dim_head=attention_head_dim,
            dropout=dropout,
            bias=attention_bias,
            upcast_attention=upcast_attention,
            cross_attention_norm=cross_attention_norm,
        )

    def _to_3d(self, hidden_states, height, weight):
        return hidden_states.permute(0, 2, 3, 1).reshape(hidden_states.shape[0], height * weight, -1)

    def _to_4d(self, hidden_states, height, weight):
        return hidden_states.permute(0, 2, 1).reshape(hidden_states.shape[0], -1, height, weight)

    def forward(
        self,
3371
3372
3373
3374
3375
3376
3377
3378
        hidden_states: torch.FloatTensor,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        # TODO: mark emb as non-optional (self.norm2 requires it).
        #       requires assessing impact of change to positional param interface.
        emb: Optional[torch.FloatTensor] = None,
        attention_mask: Optional[torch.FloatTensor] = None,
        cross_attention_kwargs: Optional[Dict[str, Any]] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
3379
3380
3381
3382
3383
3384
3385
3386
3387
3388
3389
3390
3391
    ):
        cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}

        # 1. Self-Attention
        if self.add_self_attention:
            norm_hidden_states = self.norm1(hidden_states, emb)

            height, weight = norm_hidden_states.shape[2:]
            norm_hidden_states = self._to_3d(norm_hidden_states, height, weight)

            attn_output = self.attn1(
                norm_hidden_states,
                encoder_hidden_states=None,
3392
                attention_mask=attention_mask,
3393
3394
3395
3396
3397
3398
3399
3400
3401
3402
3403
3404
3405
3406
                **cross_attention_kwargs,
            )
            attn_output = self._to_4d(attn_output, height, weight)

            hidden_states = attn_output + hidden_states

        # 2. Cross-Attention/None
        norm_hidden_states = self.norm2(hidden_states, emb)

        height, weight = norm_hidden_states.shape[2:]
        norm_hidden_states = self._to_3d(norm_hidden_states, height, weight)
        attn_output = self.attn2(
            norm_hidden_states,
            encoder_hidden_states=encoder_hidden_states,
3407
            attention_mask=attention_mask if encoder_hidden_states is None else encoder_attention_mask,
3408
3409
3410
3411
3412
3413
3414
            **cross_attention_kwargs,
        )
        attn_output = self._to_4d(attn_output, height, weight)

        hidden_states = attn_output + hidden_states

        return hidden_states