attention.py 32.6 KB
Newer Older
1
# Copyright 2024 The HuggingFace Team. All rights reserved.
2
3
4
5
6
7
8
9
10
11
12
13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
from typing import Any, Dict, Optional
15
16

import torch
Will Berman's avatar
Will Berman committed
17
import torch.nn.functional as F
18
19
from torch import nn

20
from ..utils import deprecate, logging
Dhruv Nair's avatar
Dhruv Nair committed
21
from ..utils.torch_utils import maybe_allow_in_graph
22
from .activations import GEGLU, GELU, ApproximateGELU
Dhruv Nair's avatar
Dhruv Nair committed
23
from .attention_processor import Attention, JointAttnProcessor2_0
Dhruv Nair's avatar
Dhruv Nair committed
24
from .embeddings import SinusoidalPositionalEmbedding
Will Berman's avatar
Will Berman committed
25
from .normalization import AdaLayerNorm, AdaLayerNormContinuous, AdaLayerNormZero, RMSNorm
26
27


28
29
30
31
logger = logging.get_logger(__name__)


def _chunked_feed_forward(ff: nn.Module, hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int):
Suraj Patil's avatar
Suraj Patil committed
32
33
34
35
36
37
38
    # "feed_forward_chunk_size" can be used to save memory
    if hidden_states.shape[chunk_dim] % chunk_size != 0:
        raise ValueError(
            f"`hidden_states` dimension to be chunked: {hidden_states.shape[chunk_dim]} has to be divisible by chunk size: {chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
        )

    num_chunks = hidden_states.shape[chunk_dim] // chunk_size
39
40
41
42
    ff_output = torch.cat(
        [ff(hid_slice) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)],
        dim=chunk_dim,
    )
Suraj Patil's avatar
Suraj Patil committed
43
44
45
    return ff_output


46
47
@maybe_allow_in_graph
class GatedSelfAttentionDense(nn.Module):
48
49
50
51
52
53
54
55
56
57
58
    r"""
    A gated self-attention dense layer that combines visual features and object features.

    Parameters:
        query_dim (`int`): The number of channels in the query.
        context_dim (`int`): The number of channels in the context.
        n_heads (`int`): The number of heads to use for attention.
        d_head (`int`): The number of channels in each head.
    """

    def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int):
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
        super().__init__()

        # we need a linear projection since we need cat visual feature and obj feature
        self.linear = nn.Linear(context_dim, query_dim)

        self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head)
        self.ff = FeedForward(query_dim, activation_fn="geglu")

        self.norm1 = nn.LayerNorm(query_dim)
        self.norm2 = nn.LayerNorm(query_dim)

        self.register_parameter("alpha_attn", nn.Parameter(torch.tensor(0.0)))
        self.register_parameter("alpha_dense", nn.Parameter(torch.tensor(0.0)))

        self.enabled = True

75
    def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor:
76
77
78
79
80
81
82
83
84
85
86
87
        if not self.enabled:
            return x

        n_visual = x.shape[1]
        objs = self.linear(objs)

        x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :]
        x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x))

        return x


Dhruv Nair's avatar
Dhruv Nair committed
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
@maybe_allow_in_graph
class JointTransformerBlock(nn.Module):
    r"""
    A Transformer block following the MMDiT architecture, introduced in Stable Diffusion 3.

    Reference: https://arxiv.org/abs/2403.03206

    Parameters:
        dim (`int`): The number of channels in the input and output.
        num_attention_heads (`int`): The number of heads to use for multi-head attention.
        attention_head_dim (`int`): The number of channels in each head.
        context_pre_only (`bool`): Boolean to determine if we should add some blocks associated with the
            processing of `context` conditions.
    """

    def __init__(self, dim, num_attention_heads, attention_head_dim, context_pre_only=False):
        super().__init__()

        self.context_pre_only = context_pre_only
        context_norm_type = "ada_norm_continous" if context_pre_only else "ada_norm_zero"

        self.norm1 = AdaLayerNormZero(dim)

        if context_norm_type == "ada_norm_continous":
            self.norm1_context = AdaLayerNormContinuous(
                dim, dim, elementwise_affine=False, eps=1e-6, bias=True, norm_type="layer_norm"
            )
        elif context_norm_type == "ada_norm_zero":
            self.norm1_context = AdaLayerNormZero(dim)
        else:
            raise ValueError(
                f"Unknown context_norm_type: {context_norm_type}, currently only support `ada_norm_continous`, `ada_norm_zero`"
            )
        if hasattr(F, "scaled_dot_product_attention"):
            processor = JointAttnProcessor2_0()
        else:
            raise ValueError(
                "The current PyTorch version does not support the `scaled_dot_product_attention` function."
            )
        self.attn = Attention(
            query_dim=dim,
            cross_attention_dim=None,
            added_kv_proj_dim=dim,
            dim_head=attention_head_dim // num_attention_heads,
            heads=num_attention_heads,
            out_dim=attention_head_dim,
            context_pre_only=context_pre_only,
            bias=True,
            processor=processor,
        )

        self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
        self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")

        if not context_pre_only:
            self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
            self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
        else:
            self.norm2_context = None
            self.ff_context = None

        # let chunk size default to None
        self._chunk_size = None
        self._chunk_dim = 0

    # Copied from diffusers.models.attention.BasicTransformerBlock.set_chunk_feed_forward
    def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
        # Sets chunk feed-forward
        self._chunk_size = chunk_size
        self._chunk_dim = dim

    def forward(
        self, hidden_states: torch.FloatTensor, encoder_hidden_states: torch.FloatTensor, temb: torch.FloatTensor
    ):
        norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb)

        if self.context_pre_only:
            norm_encoder_hidden_states = self.norm1_context(encoder_hidden_states, temb)
        else:
            norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context(
                encoder_hidden_states, emb=temb
            )

        # Attention.
        attn_output, context_attn_output = self.attn(
            hidden_states=norm_hidden_states, encoder_hidden_states=norm_encoder_hidden_states
        )

        # Process attention outputs for the `hidden_states`.
        attn_output = gate_msa.unsqueeze(1) * attn_output
        hidden_states = hidden_states + attn_output

        norm_hidden_states = self.norm2(hidden_states)
        norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
        if self._chunk_size is not None:
            # "feed_forward_chunk_size" can be used to save memory
            ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size)
        else:
            ff_output = self.ff(norm_hidden_states)
        ff_output = gate_mlp.unsqueeze(1) * ff_output

        hidden_states = hidden_states + ff_output

        # Process attention outputs for the `encoder_hidden_states`.
        if self.context_pre_only:
            encoder_hidden_states = None
        else:
            context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output
            encoder_hidden_states = encoder_hidden_states + context_attn_output

            norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states)
            norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None]
            if self._chunk_size is not None:
                # "feed_forward_chunk_size" can be used to save memory
                context_ff_output = _chunked_feed_forward(
                    self.ff_context, norm_encoder_hidden_states, self._chunk_dim, self._chunk_size
                )
            else:
                context_ff_output = self.ff_context(norm_encoder_hidden_states)
            encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output

        return encoder_hidden_states, hidden_states


212
@maybe_allow_in_graph
Patrick von Platen's avatar
Patrick von Platen committed
213
class BasicTransformerBlock(nn.Module):
Kashif Rasul's avatar
Kashif Rasul committed
214
215
216
217
    r"""
    A basic Transformer block.

    Parameters:
Will Berman's avatar
Will Berman committed
218
219
220
221
        dim (`int`): The number of channels in the input and output.
        num_attention_heads (`int`): The number of heads to use for multi-head attention.
        attention_head_dim (`int`): The number of channels in each head.
        dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
Will Berman's avatar
Will Berman committed
222
        cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
Will Berman's avatar
Will Berman committed
223
224
225
226
227
        activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
        num_embeds_ada_norm (:
            obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
        attention_bias (:
            obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
228
229
230
231
232
233
234
235
236
237
238
239
240
241
        only_cross_attention (`bool`, *optional*):
            Whether to use only cross-attention layers. In this case two cross attention layers are used.
        double_self_attention (`bool`, *optional*):
            Whether to use two self-attention layers. In this case no cross attention layers are used.
        upcast_attention (`bool`, *optional*):
            Whether to upcast the attention computation to float32. This is useful for mixed precision training.
        norm_elementwise_affine (`bool`, *optional*, defaults to `True`):
            Whether to use learnable elementwise affine parameters for normalization.
        norm_type (`str`, *optional*, defaults to `"layer_norm"`):
            The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`.
        final_dropout (`bool` *optional*, defaults to False):
            Whether to apply a final dropout after the last feed-forward layer.
        attention_type (`str`, *optional*, defaults to `"default"`):
            The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`.
Dhruv Nair's avatar
Dhruv Nair committed
242
243
244
245
        positional_embeddings (`str`, *optional*, defaults to `None`):
            The type of positional embeddings to apply to.
        num_positional_embeddings (`int`, *optional*, defaults to `None`):
            The maximum number of positional embeddings to apply.
Kashif Rasul's avatar
Kashif Rasul committed
246
247
248
249
250
    """

    def __init__(
        self,
        dim: int,
Will Berman's avatar
Will Berman committed
251
252
        num_attention_heads: int,
        attention_head_dim: int,
Kashif Rasul's avatar
Kashif Rasul committed
253
        dropout=0.0,
Will Berman's avatar
Will Berman committed
254
255
256
257
        cross_attention_dim: Optional[int] = None,
        activation_fn: str = "geglu",
        num_embeds_ada_norm: Optional[int] = None,
        attention_bias: bool = False,
258
        only_cross_attention: bool = False,
259
        double_self_attention: bool = False,
260
        upcast_attention: bool = False,
Kashif Rasul's avatar
Kashif Rasul committed
261
        norm_elementwise_affine: bool = True,
262
        norm_type: str = "layer_norm",  # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single', 'ada_norm_continuous', 'layer_norm_i2vgen'
Sayak Paul's avatar
Sayak Paul committed
263
        norm_eps: float = 1e-5,
Kashif Rasul's avatar
Kashif Rasul committed
264
        final_dropout: bool = False,
265
        attention_type: str = "default",
Dhruv Nair's avatar
Dhruv Nair committed
266
267
        positional_embeddings: Optional[str] = None,
        num_positional_embeddings: Optional[int] = None,
Will Berman's avatar
Will Berman committed
268
269
270
271
272
        ada_norm_continous_conditioning_embedding_dim: Optional[int] = None,
        ada_norm_bias: Optional[int] = None,
        ff_inner_dim: Optional[int] = None,
        ff_bias: bool = True,
        attention_out_bias: bool = True,
Kashif Rasul's avatar
Kashif Rasul committed
273
    ):
Patrick von Platen's avatar
Patrick von Platen committed
274
        super().__init__()
275
        self.only_cross_attention = only_cross_attention
Kashif Rasul's avatar
Kashif Rasul committed
276

277
        # We keep these boolean flags for backward-compatibility.
278
279
280
281
282
283
        self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
        self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
        self.use_ada_layer_norm_single = norm_type == "ada_norm_single"
        self.use_layer_norm = norm_type == "layer_norm"
        self.use_ada_layer_norm_continuous = norm_type == "ada_norm_continuous"

Kashif Rasul's avatar
Kashif Rasul committed
284
285
286
287
288
        if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
            raise ValueError(
                f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
                f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
            )
289

290
291
292
        self.norm_type = norm_type
        self.num_embeds_ada_norm = num_embeds_ada_norm

Dhruv Nair's avatar
Dhruv Nair committed
293
294
295
296
297
298
299
300
301
302
        if positional_embeddings and (num_positional_embeddings is None):
            raise ValueError(
                "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined."
            )

        if positional_embeddings == "sinusoidal":
            self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings)
        else:
            self.pos_embed = None

303
        # Define 3 blocks. Each block has its own normalization layer.
304
        # 1. Self-Attn
305
        if norm_type == "ada_norm":
306
            self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
307
        elif norm_type == "ada_norm_zero":
308
            self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)
309
        elif norm_type == "ada_norm_continuous":
Will Berman's avatar
Will Berman committed
310
311
312
313
314
315
316
317
            self.norm1 = AdaLayerNormContinuous(
                dim,
                ada_norm_continous_conditioning_embedding_dim,
                norm_elementwise_affine,
                norm_eps,
                ada_norm_bias,
                "rms_norm",
            )
318
        else:
Sayak Paul's avatar
Sayak Paul committed
319
320
            self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)

Patrick von Platen's avatar
Patrick von Platen committed
321
        self.attn1 = Attention(
Will Berman's avatar
Will Berman committed
322
323
324
325
326
            query_dim=dim,
            heads=num_attention_heads,
            dim_head=attention_head_dim,
            dropout=dropout,
            bias=attention_bias,
327
            cross_attention_dim=cross_attention_dim if only_cross_attention else None,
328
            upcast_attention=upcast_attention,
Will Berman's avatar
Will Berman committed
329
            out_bias=attention_out_bias,
330
331
        )

332
        # 2. Cross-Attn
333
        if cross_attention_dim is not None or double_self_attention:
334
335
336
            # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
            # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
            # the second cross attention block.
337
            if norm_type == "ada_norm":
Will Berman's avatar
Will Berman committed
338
                self.norm2 = AdaLayerNorm(dim, num_embeds_ada_norm)
339
            elif norm_type == "ada_norm_continuous":
Will Berman's avatar
Will Berman committed
340
341
342
343
344
345
346
347
348
349
350
                self.norm2 = AdaLayerNormContinuous(
                    dim,
                    ada_norm_continous_conditioning_embedding_dim,
                    norm_elementwise_affine,
                    norm_eps,
                    ada_norm_bias,
                    "rms_norm",
                )
            else:
                self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)

Patrick von Platen's avatar
Patrick von Platen committed
351
            self.attn2 = Attention(
352
                query_dim=dim,
353
                cross_attention_dim=cross_attention_dim if not double_self_attention else None,
354
355
356
357
                heads=num_attention_heads,
                dim_head=attention_head_dim,
                dropout=dropout,
                bias=attention_bias,
358
                upcast_attention=upcast_attention,
Will Berman's avatar
Will Berman committed
359
                out_bias=attention_out_bias,
Will Berman's avatar
Will Berman committed
360
            )  # is self-attn if encoder_hidden_states is none
361
362
        else:
            self.norm2 = None
363
            self.attn2 = None
364
365

        # 3. Feed-forward
366
        if norm_type == "ada_norm_continuous":
Will Berman's avatar
Will Berman committed
367
368
369
370
371
372
373
374
            self.norm3 = AdaLayerNormContinuous(
                dim,
                ada_norm_continous_conditioning_embedding_dim,
                norm_elementwise_affine,
                norm_eps,
                ada_norm_bias,
                "layer_norm",
            )
375
376

        elif norm_type in ["ada_norm_zero", "ada_norm", "layer_norm", "ada_norm_continuous"]:
Will Berman's avatar
Will Berman committed
377
            self.norm3 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)
378
379
        elif norm_type == "layer_norm_i2vgen":
            self.norm3 = None
Sayak Paul's avatar
Sayak Paul committed
380

Suraj Patil's avatar
Suraj Patil committed
381
382
383
384
385
        self.ff = FeedForward(
            dim,
            dropout=dropout,
            activation_fn=activation_fn,
            final_dropout=final_dropout,
Will Berman's avatar
Will Berman committed
386
387
            inner_dim=ff_inner_dim,
            bias=ff_bias,
Suraj Patil's avatar
Suraj Patil committed
388
        )
Patrick von Platen's avatar
Patrick von Platen committed
389

390
        # 4. Fuser
391
        if attention_type == "gated" or attention_type == "gated-text-image":
392
393
            self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim)

Sayak Paul's avatar
Sayak Paul committed
394
        # 5. Scale-shift for PixArt-Alpha.
395
        if norm_type == "ada_norm_single":
Sayak Paul's avatar
Sayak Paul committed
396
397
            self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5)

398
399
400
401
        # let chunk size default to None
        self._chunk_size = None
        self._chunk_dim = 0

Suraj Patil's avatar
Suraj Patil committed
402
    def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
403
404
405
406
        # Sets chunk feed-forward
        self._chunk_size = chunk_size
        self._chunk_dim = dim

407
408
    def forward(
        self,
409
410
411
412
        hidden_states: torch.Tensor,
        attention_mask: Optional[torch.Tensor] = None,
        encoder_hidden_states: Optional[torch.Tensor] = None,
        encoder_attention_mask: Optional[torch.Tensor] = None,
413
414
415
        timestep: Optional[torch.LongTensor] = None,
        cross_attention_kwargs: Dict[str, Any] = None,
        class_labels: Optional[torch.LongTensor] = None,
Will Berman's avatar
Will Berman committed
416
        added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
417
    ) -> torch.Tensor:
418
419
        if cross_attention_kwargs is not None:
            if cross_attention_kwargs.get("scale", None) is not None:
420
                logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
421

422
        # Notice that normalization is always applied before the real computation in the following blocks.
423
        # 0. Self-Attention
Sayak Paul's avatar
Sayak Paul committed
424
425
        batch_size = hidden_states.shape[0]

426
        if self.norm_type == "ada_norm":
Kashif Rasul's avatar
Kashif Rasul committed
427
            norm_hidden_states = self.norm1(hidden_states, timestep)
428
        elif self.norm_type == "ada_norm_zero":
Kashif Rasul's avatar
Kashif Rasul committed
429
430
431
            norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
                hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
            )
432
        elif self.norm_type in ["layer_norm", "layer_norm_i2vgen"]:
Kashif Rasul's avatar
Kashif Rasul committed
433
            norm_hidden_states = self.norm1(hidden_states)
434
        elif self.norm_type == "ada_norm_continuous":
Will Berman's avatar
Will Berman committed
435
            norm_hidden_states = self.norm1(hidden_states, added_cond_kwargs["pooled_text_emb"])
436
        elif self.norm_type == "ada_norm_single":
Sayak Paul's avatar
Sayak Paul committed
437
438
439
440
441
442
443
444
            shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
                self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1)
            ).chunk(6, dim=1)
            norm_hidden_states = self.norm1(hidden_states)
            norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
            norm_hidden_states = norm_hidden_states.squeeze(1)
        else:
            raise ValueError("Incorrect norm used")
Kashif Rasul's avatar
Kashif Rasul committed
445

Dhruv Nair's avatar
Dhruv Nair committed
446
447
448
        if self.pos_embed is not None:
            norm_hidden_states = self.pos_embed(norm_hidden_states)

449
        # 1. Prepare GLIGEN inputs
450
451
        cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
        gligen_kwargs = cross_attention_kwargs.pop("gligen", None)
452

453
454
455
456
457
458
        attn_output = self.attn1(
            norm_hidden_states,
            encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
            attention_mask=attention_mask,
            **cross_attention_kwargs,
        )
459
        if self.norm_type == "ada_norm_zero":
Kashif Rasul's avatar
Kashif Rasul committed
460
            attn_output = gate_msa.unsqueeze(1) * attn_output
461
        elif self.norm_type == "ada_norm_single":
Sayak Paul's avatar
Sayak Paul committed
462
463
            attn_output = gate_msa * attn_output

464
        hidden_states = attn_output + hidden_states
Sayak Paul's avatar
Sayak Paul committed
465
466
        if hidden_states.ndim == 4:
            hidden_states = hidden_states.squeeze(1)
Will Berman's avatar
Will Berman committed
467

468
        # 1.2 GLIGEN Control
469
470
471
        if gligen_kwargs is not None:
            hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"])

472
        # 3. Cross-Attention
473
        if self.attn2 is not None:
474
            if self.norm_type == "ada_norm":
Sayak Paul's avatar
Sayak Paul committed
475
                norm_hidden_states = self.norm2(hidden_states, timestep)
476
            elif self.norm_type in ["ada_norm_zero", "layer_norm", "layer_norm_i2vgen"]:
Sayak Paul's avatar
Sayak Paul committed
477
                norm_hidden_states = self.norm2(hidden_states)
478
            elif self.norm_type == "ada_norm_single":
Sayak Paul's avatar
Sayak Paul committed
479
480
481
                # For PixArt norm2 isn't applied here:
                # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103
                norm_hidden_states = hidden_states
482
            elif self.norm_type == "ada_norm_continuous":
Will Berman's avatar
Will Berman committed
483
                norm_hidden_states = self.norm2(hidden_states, added_cond_kwargs["pooled_text_emb"])
Sayak Paul's avatar
Sayak Paul committed
484
485
486
            else:
                raise ValueError("Incorrect norm")

487
            if self.pos_embed is not None and self.norm_type != "ada_norm_single":
Dhruv Nair's avatar
Dhruv Nair committed
488
                norm_hidden_states = self.pos_embed(norm_hidden_states)
Kashif Rasul's avatar
Kashif Rasul committed
489

490
491
492
            attn_output = self.attn2(
                norm_hidden_states,
                encoder_hidden_states=encoder_hidden_states,
493
                attention_mask=encoder_attention_mask,
494
                **cross_attention_kwargs,
Will Berman's avatar
Will Berman committed
495
            )
496
            hidden_states = attn_output + hidden_states
Will Berman's avatar
Will Berman committed
497

498
        # 4. Feed-forward
499
500
        # i2vgen doesn't have this norm 🤷‍♂️
        if self.norm_type == "ada_norm_continuous":
Will Berman's avatar
Will Berman committed
501
            norm_hidden_states = self.norm3(hidden_states, added_cond_kwargs["pooled_text_emb"])
502
        elif not self.norm_type == "ada_norm_single":
Sayak Paul's avatar
Sayak Paul committed
503
            norm_hidden_states = self.norm3(hidden_states)
Kashif Rasul's avatar
Kashif Rasul committed
504

505
        if self.norm_type == "ada_norm_zero":
Kashif Rasul's avatar
Kashif Rasul committed
506
507
            norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]

508
        if self.norm_type == "ada_norm_single":
Sayak Paul's avatar
Sayak Paul committed
509
510
511
            norm_hidden_states = self.norm2(hidden_states)
            norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp

512
513
        if self._chunk_size is not None:
            # "feed_forward_chunk_size" can be used to save memory
514
            ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size)
515
        else:
516
            ff_output = self.ff(norm_hidden_states)
Kashif Rasul's avatar
Kashif Rasul committed
517

518
        if self.norm_type == "ada_norm_zero":
Kashif Rasul's avatar
Kashif Rasul committed
519
            ff_output = gate_mlp.unsqueeze(1) * ff_output
520
        elif self.norm_type == "ada_norm_single":
Sayak Paul's avatar
Sayak Paul committed
521
            ff_output = gate_mlp * ff_output
Kashif Rasul's avatar
Kashif Rasul committed
522
523

        hidden_states = ff_output + hidden_states
Sayak Paul's avatar
Sayak Paul committed
524
525
        if hidden_states.ndim == 4:
            hidden_states = hidden_states.squeeze(1)
Will Berman's avatar
Will Berman committed
526

527
        return hidden_states
Patrick von Platen's avatar
Patrick von Platen committed
528
529


Suraj Patil's avatar
Suraj Patil committed
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
@maybe_allow_in_graph
class TemporalBasicTransformerBlock(nn.Module):
    r"""
    A basic Transformer block for video like data.

    Parameters:
        dim (`int`): The number of channels in the input and output.
        time_mix_inner_dim (`int`): The number of channels for temporal attention.
        num_attention_heads (`int`): The number of heads to use for multi-head attention.
        attention_head_dim (`int`): The number of channels in each head.
        cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
    """

    def __init__(
        self,
        dim: int,
        time_mix_inner_dim: int,
        num_attention_heads: int,
        attention_head_dim: int,
        cross_attention_dim: Optional[int] = None,
    ):
        super().__init__()
        self.is_res = dim == time_mix_inner_dim

        self.norm_in = nn.LayerNorm(dim)

        # Define 3 blocks. Each block has its own normalization layer.
        # 1. Self-Attn
        self.ff_in = FeedForward(
            dim,
            dim_out=time_mix_inner_dim,
            activation_fn="geglu",
        )

        self.norm1 = nn.LayerNorm(time_mix_inner_dim)
        self.attn1 = Attention(
            query_dim=time_mix_inner_dim,
            heads=num_attention_heads,
            dim_head=attention_head_dim,
            cross_attention_dim=None,
        )

        # 2. Cross-Attn
        if cross_attention_dim is not None:
            # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
            # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
            # the second cross attention block.
            self.norm2 = nn.LayerNorm(time_mix_inner_dim)
            self.attn2 = Attention(
                query_dim=time_mix_inner_dim,
                cross_attention_dim=cross_attention_dim,
                heads=num_attention_heads,
                dim_head=attention_head_dim,
            )  # is self-attn if encoder_hidden_states is none
        else:
            self.norm2 = None
            self.attn2 = None

        # 3. Feed-forward
        self.norm3 = nn.LayerNorm(time_mix_inner_dim)
        self.ff = FeedForward(time_mix_inner_dim, activation_fn="geglu")

        # let chunk size default to None
        self._chunk_size = None
        self._chunk_dim = None

    def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs):
        # Sets chunk feed-forward
        self._chunk_size = chunk_size
        # chunk dim should be hardcoded to 1 to have better speed vs. memory trade-off
        self._chunk_dim = 1

    def forward(
        self,
604
        hidden_states: torch.Tensor,
Suraj Patil's avatar
Suraj Patil committed
605
        num_frames: int,
606
607
        encoder_hidden_states: Optional[torch.Tensor] = None,
    ) -> torch.Tensor:
Suraj Patil's avatar
Suraj Patil committed
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
        # Notice that normalization is always applied before the real computation in the following blocks.
        # 0. Self-Attention
        batch_size = hidden_states.shape[0]

        batch_frames, seq_length, channels = hidden_states.shape
        batch_size = batch_frames // num_frames

        hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, seq_length, channels)
        hidden_states = hidden_states.permute(0, 2, 1, 3)
        hidden_states = hidden_states.reshape(batch_size * seq_length, num_frames, channels)

        residual = hidden_states
        hidden_states = self.norm_in(hidden_states)

        if self._chunk_size is not None:
Dhruv Nair's avatar
Dhruv Nair committed
623
            hidden_states = _chunked_feed_forward(self.ff_in, hidden_states, self._chunk_dim, self._chunk_size)
Suraj Patil's avatar
Suraj Patil committed
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
        else:
            hidden_states = self.ff_in(hidden_states)

        if self.is_res:
            hidden_states = hidden_states + residual

        norm_hidden_states = self.norm1(hidden_states)
        attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None)
        hidden_states = attn_output + hidden_states

        # 3. Cross-Attention
        if self.attn2 is not None:
            norm_hidden_states = self.norm2(hidden_states)
            attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states)
            hidden_states = attn_output + hidden_states

        # 4. Feed-forward
        norm_hidden_states = self.norm3(hidden_states)

        if self._chunk_size is not None:
            ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size)
        else:
            ff_output = self.ff(norm_hidden_states)

        if self.is_res:
            hidden_states = ff_output + hidden_states
        else:
            hidden_states = ff_output

        hidden_states = hidden_states[None, :].reshape(batch_size, seq_length, num_frames, channels)
        hidden_states = hidden_states.permute(0, 2, 1, 3)
        hidden_states = hidden_states.reshape(batch_size * num_frames, seq_length, channels)

        return hidden_states


Will Berman's avatar
Will Berman committed
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
class SkipFFTransformerBlock(nn.Module):
    def __init__(
        self,
        dim: int,
        num_attention_heads: int,
        attention_head_dim: int,
        kv_input_dim: int,
        kv_input_dim_proj_use_bias: bool,
        dropout=0.0,
        cross_attention_dim: Optional[int] = None,
        attention_bias: bool = False,
        attention_out_bias: bool = True,
    ):
        super().__init__()
        if kv_input_dim != dim:
            self.kv_mapper = nn.Linear(kv_input_dim, dim, kv_input_dim_proj_use_bias)
        else:
            self.kv_mapper = None

        self.norm1 = RMSNorm(dim, 1e-06)

        self.attn1 = Attention(
            query_dim=dim,
            heads=num_attention_heads,
            dim_head=attention_head_dim,
            dropout=dropout,
            bias=attention_bias,
            cross_attention_dim=cross_attention_dim,
            out_bias=attention_out_bias,
        )

        self.norm2 = RMSNorm(dim, 1e-06)

        self.attn2 = Attention(
            query_dim=dim,
            cross_attention_dim=cross_attention_dim,
            heads=num_attention_heads,
            dim_head=attention_head_dim,
            dropout=dropout,
            bias=attention_bias,
            out_bias=attention_out_bias,
        )

    def forward(self, hidden_states, encoder_hidden_states, cross_attention_kwargs):
        cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}

        if self.kv_mapper is not None:
            encoder_hidden_states = self.kv_mapper(F.silu(encoder_hidden_states))

        norm_hidden_states = self.norm1(hidden_states)

        attn_output = self.attn1(
            norm_hidden_states,
            encoder_hidden_states=encoder_hidden_states,
            **cross_attention_kwargs,
        )

        hidden_states = attn_output + hidden_states

        norm_hidden_states = self.norm2(hidden_states)

        attn_output = self.attn2(
            norm_hidden_states,
            encoder_hidden_states=encoder_hidden_states,
            **cross_attention_kwargs,
        )

        hidden_states = attn_output + hidden_states

        return hidden_states


Patrick von Platen's avatar
Patrick von Platen committed
732
class FeedForward(nn.Module):
Kashif Rasul's avatar
Kashif Rasul committed
733
734
735
736
    r"""
    A feed-forward layer.

    Parameters:
Will Berman's avatar
Will Berman committed
737
738
739
740
741
        dim (`int`): The number of channels in the input.
        dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
        mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
        dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
        activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
Kashif Rasul's avatar
Kashif Rasul committed
742
        final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
743
        bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
Kashif Rasul's avatar
Kashif Rasul committed
744
745
746
    """

    def __init__(
Will Berman's avatar
Will Berman committed
747
748
749
750
751
752
        self,
        dim: int,
        dim_out: Optional[int] = None,
        mult: int = 4,
        dropout: float = 0.0,
        activation_fn: str = "geglu",
Kashif Rasul's avatar
Kashif Rasul committed
753
        final_dropout: bool = False,
Will Berman's avatar
Will Berman committed
754
        inner_dim=None,
755
        bias: bool = True,
Kashif Rasul's avatar
Kashif Rasul committed
756
    ):
Patrick von Platen's avatar
Patrick von Platen committed
757
        super().__init__()
Will Berman's avatar
Will Berman committed
758
759
        if inner_dim is None:
            inner_dim = int(dim * mult)
760
        dim_out = dim_out if dim_out is not None else dim
Patrick von Platen's avatar
Patrick von Platen committed
761

762
        if activation_fn == "gelu":
763
            act_fn = GELU(dim, inner_dim, bias=bias)
Kashif Rasul's avatar
Kashif Rasul committed
764
        if activation_fn == "gelu-approximate":
765
            act_fn = GELU(dim, inner_dim, approximate="tanh", bias=bias)
766
        elif activation_fn == "geglu":
767
            act_fn = GEGLU(dim, inner_dim, bias=bias)
Will Berman's avatar
Will Berman committed
768
        elif activation_fn == "geglu-approximate":
769
            act_fn = ApproximateGELU(dim, inner_dim, bias=bias)
Will Berman's avatar
Will Berman committed
770
771

        self.net = nn.ModuleList([])
772
        # project in
773
        self.net.append(act_fn)
774
775
776
        # project dropout
        self.net.append(nn.Dropout(dropout))
        # project out
777
        self.net.append(nn.Linear(inner_dim, dim_out, bias=bias))
Kashif Rasul's avatar
Kashif Rasul committed
778
779
780
        # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
        if final_dropout:
            self.net.append(nn.Dropout(dropout))
Patrick von Platen's avatar
Patrick von Platen committed
781

782
783
784
785
    def forward(self, hidden_states: torch.Tensor, *args, **kwargs) -> torch.Tensor:
        if len(args) > 0 or kwargs.get("scale", None) is not None:
            deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
            deprecate("scale", "1.0.0", deprecation_message)
786
        for module in self.net:
787
            hidden_states = module(hidden_states)
788
        return hidden_states