attention.py 16.5 KB
Newer Older
Patrick von Platen's avatar
Patrick von Platen committed
1
# Copyright 2023 The HuggingFace Team. All rights reserved.
2
3
4
5
6
7
8
9
10
11
12
13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
from typing import Any, Dict, Optional
15
16

import torch
Patrick von Platen's avatar
Patrick von Platen committed
17
import torch.nn.functional as F
18
19
from torch import nn

20
from ..utils import maybe_allow_in_graph
21
from .activations import get_activation
Patrick von Platen's avatar
Patrick von Platen committed
22
from .attention_processor import Attention
Kashif Rasul's avatar
Kashif Rasul committed
23
from .embeddings import CombinedTimestepLabelEmbeddings
24
from .lora import LoRACompatibleLinear
25
26


27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
@maybe_allow_in_graph
class GatedSelfAttentionDense(nn.Module):
    def __init__(self, query_dim, context_dim, n_heads, d_head):
        super().__init__()

        # we need a linear projection since we need cat visual feature and obj feature
        self.linear = nn.Linear(context_dim, query_dim)

        self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head)
        self.ff = FeedForward(query_dim, activation_fn="geglu")

        self.norm1 = nn.LayerNorm(query_dim)
        self.norm2 = nn.LayerNorm(query_dim)

        self.register_parameter("alpha_attn", nn.Parameter(torch.tensor(0.0)))
        self.register_parameter("alpha_dense", nn.Parameter(torch.tensor(0.0)))

        self.enabled = True

    def forward(self, x, objs):
        if not self.enabled:
            return x

        n_visual = x.shape[1]
        objs = self.linear(objs)

        x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :]
        x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x))

        return x


59
@maybe_allow_in_graph
Patrick von Platen's avatar
Patrick von Platen committed
60
class BasicTransformerBlock(nn.Module):
Kashif Rasul's avatar
Kashif Rasul committed
61
62
63
64
    r"""
    A basic Transformer block.

    Parameters:
Will Berman's avatar
Will Berman committed
65
66
67
68
        dim (`int`): The number of channels in the input and output.
        num_attention_heads (`int`): The number of heads to use for multi-head attention.
        attention_head_dim (`int`): The number of channels in each head.
        dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
Will Berman's avatar
Will Berman committed
69
        cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
70
71
72
73
        only_cross_attention (`bool`, *optional*):
            Whether to use only cross-attention layers. In this case two cross attention layers are used.
        double_self_attention (`bool`, *optional*):
            Whether to use two self-attention layers. In this case no cross attention layers are used.
Will Berman's avatar
Will Berman committed
74
75
76
77
78
        activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
        num_embeds_ada_norm (:
            obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
        attention_bias (:
            obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
Kashif Rasul's avatar
Kashif Rasul committed
79
80
81
82
83
    """

    def __init__(
        self,
        dim: int,
Will Berman's avatar
Will Berman committed
84
85
        num_attention_heads: int,
        attention_head_dim: int,
Kashif Rasul's avatar
Kashif Rasul committed
86
        dropout=0.0,
Will Berman's avatar
Will Berman committed
87
88
89
90
        cross_attention_dim: Optional[int] = None,
        activation_fn: str = "geglu",
        num_embeds_ada_norm: Optional[int] = None,
        attention_bias: bool = False,
91
        only_cross_attention: bool = False,
92
        double_self_attention: bool = False,
93
        upcast_attention: bool = False,
Kashif Rasul's avatar
Kashif Rasul committed
94
95
96
        norm_elementwise_affine: bool = True,
        norm_type: str = "layer_norm",
        final_dropout: bool = False,
97
        attention_type: str = "default",
Kashif Rasul's avatar
Kashif Rasul committed
98
    ):
Patrick von Platen's avatar
Patrick von Platen committed
99
        super().__init__()
100
        self.only_cross_attention = only_cross_attention
Kashif Rasul's avatar
Kashif Rasul committed
101
102
103
104
105
106
107
108
109

        self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
        self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"

        if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
            raise ValueError(
                f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
                f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
            )
110

111
        # Define 3 blocks. Each block has its own normalization layer.
112
        # 1. Self-Attn
113
114
115
116
117
118
        if self.use_ada_layer_norm:
            self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
        elif self.use_ada_layer_norm_zero:
            self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)
        else:
            self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
Patrick von Platen's avatar
Patrick von Platen committed
119
        self.attn1 = Attention(
Will Berman's avatar
Will Berman committed
120
121
122
123
124
            query_dim=dim,
            heads=num_attention_heads,
            dim_head=attention_head_dim,
            dropout=dropout,
            bias=attention_bias,
125
            cross_attention_dim=cross_attention_dim if only_cross_attention else None,
126
            upcast_attention=upcast_attention,
127
128
        )

129
        # 2. Cross-Attn
130
        if cross_attention_dim is not None or double_self_attention:
131
132
133
134
135
136
137
138
            # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
            # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
            # the second cross attention block.
            self.norm2 = (
                AdaLayerNorm(dim, num_embeds_ada_norm)
                if self.use_ada_layer_norm
                else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
            )
Patrick von Platen's avatar
Patrick von Platen committed
139
            self.attn2 = Attention(
140
                query_dim=dim,
141
                cross_attention_dim=cross_attention_dim if not double_self_attention else None,
142
143
144
145
                heads=num_attention_heads,
                dim_head=attention_head_dim,
                dropout=dropout,
                bias=attention_bias,
146
                upcast_attention=upcast_attention,
Will Berman's avatar
Will Berman committed
147
            )  # is self-attn if encoder_hidden_states is none
148
149
        else:
            self.norm2 = None
150
            self.attn2 = None
151
152

        # 3. Feed-forward
Kashif Rasul's avatar
Kashif Rasul committed
153
        self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
154
        self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)
Patrick von Platen's avatar
Patrick von Platen committed
155

156
        # 4. Fuser
157
        if attention_type == "gated" or attention_type == "gated-text-image":
158
159
            self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim)

160
161
162
163
164
165
166
167
168
        # let chunk size default to None
        self._chunk_size = None
        self._chunk_dim = 0

    def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int):
        # Sets chunk feed-forward
        self._chunk_size = chunk_size
        self._chunk_dim = dim

169
170
    def forward(
        self,
171
172
173
174
175
176
177
        hidden_states: torch.FloatTensor,
        attention_mask: Optional[torch.FloatTensor] = None,
        encoder_hidden_states: Optional[torch.FloatTensor] = None,
        encoder_attention_mask: Optional[torch.FloatTensor] = None,
        timestep: Optional[torch.LongTensor] = None,
        cross_attention_kwargs: Dict[str, Any] = None,
        class_labels: Optional[torch.LongTensor] = None,
178
    ):
179
180
        # Notice that normalization is always applied before the real computation in the following blocks.
        # 1. Self-Attention
Kashif Rasul's avatar
Kashif Rasul committed
181
182
183
184
185
186
187
188
189
        if self.use_ada_layer_norm:
            norm_hidden_states = self.norm1(hidden_states, timestep)
        elif self.use_ada_layer_norm_zero:
            norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
                hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
            )
        else:
            norm_hidden_states = self.norm1(hidden_states)

190
191
192
        # 0. Prepare GLIGEN inputs
        cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
        gligen_kwargs = cross_attention_kwargs.pop("gligen", None)
193

194
195
196
197
198
199
        attn_output = self.attn1(
            norm_hidden_states,
            encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
            attention_mask=attention_mask,
            **cross_attention_kwargs,
        )
Kashif Rasul's avatar
Kashif Rasul committed
200
201
        if self.use_ada_layer_norm_zero:
            attn_output = gate_msa.unsqueeze(1) * attn_output
202
        hidden_states = attn_output + hidden_states
Will Berman's avatar
Will Berman committed
203

204
205
206
207
208
        # 1.5 GLIGEN Control
        if gligen_kwargs is not None:
            hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"])
        # 1.5 ends

209
        # 2. Cross-Attention
210
211
212
213
        if self.attn2 is not None:
            norm_hidden_states = (
                self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
            )
Kashif Rasul's avatar
Kashif Rasul committed
214

215
216
217
            attn_output = self.attn2(
                norm_hidden_states,
                encoder_hidden_states=encoder_hidden_states,
218
                attention_mask=encoder_attention_mask,
219
                **cross_attention_kwargs,
Will Berman's avatar
Will Berman committed
220
            )
221
            hidden_states = attn_output + hidden_states
Will Berman's avatar
Will Berman committed
222
223

        # 3. Feed-forward
Kashif Rasul's avatar
Kashif Rasul committed
224
225
226
227
228
        norm_hidden_states = self.norm3(hidden_states)

        if self.use_ada_layer_norm_zero:
            norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]

229
230
231
232
233
234
235
236
237
238
239
240
241
242
        if self._chunk_size is not None:
            # "feed_forward_chunk_size" can be used to save memory
            if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
                raise ValueError(
                    f"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
                )

            num_chunks = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
            ff_output = torch.cat(
                [self.ff(hid_slice) for hid_slice in norm_hidden_states.chunk(num_chunks, dim=self._chunk_dim)],
                dim=self._chunk_dim,
            )
        else:
            ff_output = self.ff(norm_hidden_states)
Kashif Rasul's avatar
Kashif Rasul committed
243
244
245
246
247

        if self.use_ada_layer_norm_zero:
            ff_output = gate_mlp.unsqueeze(1) * ff_output

        hidden_states = ff_output + hidden_states
Will Berman's avatar
Will Berman committed
248

249
        return hidden_states
Patrick von Platen's avatar
Patrick von Platen committed
250
251
252


class FeedForward(nn.Module):
Kashif Rasul's avatar
Kashif Rasul committed
253
254
255
256
    r"""
    A feed-forward layer.

    Parameters:
Will Berman's avatar
Will Berman committed
257
258
259
260
261
        dim (`int`): The number of channels in the input.
        dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
        mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
        dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
        activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
Kashif Rasul's avatar
Kashif Rasul committed
262
        final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
Kashif Rasul's avatar
Kashif Rasul committed
263
264
265
    """

    def __init__(
Will Berman's avatar
Will Berman committed
266
267
268
269
270
271
        self,
        dim: int,
        dim_out: Optional[int] = None,
        mult: int = 4,
        dropout: float = 0.0,
        activation_fn: str = "geglu",
Kashif Rasul's avatar
Kashif Rasul committed
272
        final_dropout: bool = False,
Kashif Rasul's avatar
Kashif Rasul committed
273
    ):
Patrick von Platen's avatar
Patrick von Platen committed
274
275
        super().__init__()
        inner_dim = int(dim * mult)
276
        dim_out = dim_out if dim_out is not None else dim
Patrick von Platen's avatar
Patrick von Platen committed
277

278
279
        if activation_fn == "gelu":
            act_fn = GELU(dim, inner_dim)
Kashif Rasul's avatar
Kashif Rasul committed
280
281
        if activation_fn == "gelu-approximate":
            act_fn = GELU(dim, inner_dim, approximate="tanh")
282
283
        elif activation_fn == "geglu":
            act_fn = GEGLU(dim, inner_dim)
Will Berman's avatar
Will Berman committed
284
        elif activation_fn == "geglu-approximate":
285
            act_fn = ApproximateGELU(dim, inner_dim)
Will Berman's avatar
Will Berman committed
286
287

        self.net = nn.ModuleList([])
288
        # project in
289
        self.net.append(act_fn)
290
291
292
        # project dropout
        self.net.append(nn.Dropout(dropout))
        # project out
293
        self.net.append(LoRACompatibleLinear(inner_dim, dim_out))
Kashif Rasul's avatar
Kashif Rasul committed
294
295
296
        # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
        if final_dropout:
            self.net.append(nn.Dropout(dropout))
Patrick von Platen's avatar
Patrick von Platen committed
297

298
    def forward(self, hidden_states):
299
300
301
        for module in self.net:
            hidden_states = module(hidden_states)
        return hidden_states
Patrick von Platen's avatar
Patrick von Platen committed
302

Patrick von Platen's avatar
Patrick von Platen committed
303

304
305
class GELU(nn.Module):
    r"""
Kashif Rasul's avatar
Kashif Rasul committed
306
    GELU activation function with tanh approximation support with `approximate="tanh"`.
307
308
    """

Kashif Rasul's avatar
Kashif Rasul committed
309
    def __init__(self, dim_in: int, dim_out: int, approximate: str = "none"):
310
311
        super().__init__()
        self.proj = nn.Linear(dim_in, dim_out)
Kashif Rasul's avatar
Kashif Rasul committed
312
        self.approximate = approximate
313
314
315

    def gelu(self, gate):
        if gate.device.type != "mps":
Kashif Rasul's avatar
Kashif Rasul committed
316
            return F.gelu(gate, approximate=self.approximate)
317
        # mps: gelu is not implemented for float16
Kashif Rasul's avatar
Kashif Rasul committed
318
        return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype)
319
320
321
322
323
324
325

    def forward(self, hidden_states):
        hidden_states = self.proj(hidden_states)
        hidden_states = self.gelu(hidden_states)
        return hidden_states


Patrick von Platen's avatar
Patrick von Platen committed
326
class GEGLU(nn.Module):
Kashif Rasul's avatar
Kashif Rasul committed
327
328
329
330
    r"""
    A variant of the gated linear unit activation function from https://arxiv.org/abs/2002.05202.

    Parameters:
Will Berman's avatar
Will Berman committed
331
332
        dim_in (`int`): The number of channels in the input.
        dim_out (`int`): The number of channels in the output.
Kashif Rasul's avatar
Kashif Rasul committed
333
334
335
    """

    def __init__(self, dim_in: int, dim_out: int):
Patrick von Platen's avatar
Patrick von Platen committed
336
        super().__init__()
337
        self.proj = LoRACompatibleLinear(dim_in, dim_out * 2)
Patrick von Platen's avatar
Patrick von Platen committed
338

339
340
341
342
343
344
    def gelu(self, gate):
        if gate.device.type != "mps":
            return F.gelu(gate)
        # mps: gelu is not implemented for float16
        return F.gelu(gate.to(dtype=torch.float32)).to(dtype=gate.dtype)

345
346
    def forward(self, hidden_states):
        hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1)
347
        return hidden_states * self.gelu(gate)
Will Berman's avatar
Will Berman committed
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382


class ApproximateGELU(nn.Module):
    """
    The approximate form of Gaussian Error Linear Unit (GELU)

    For more details, see section 2: https://arxiv.org/abs/1606.08415
    """

    def __init__(self, dim_in: int, dim_out: int):
        super().__init__()
        self.proj = nn.Linear(dim_in, dim_out)

    def forward(self, x):
        x = self.proj(x)
        return x * torch.sigmoid(1.702 * x)


class AdaLayerNorm(nn.Module):
    """
    Norm layer modified to incorporate timestep embeddings.
    """

    def __init__(self, embedding_dim, num_embeddings):
        super().__init__()
        self.emb = nn.Embedding(num_embeddings, embedding_dim)
        self.silu = nn.SiLU()
        self.linear = nn.Linear(embedding_dim, embedding_dim * 2)
        self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False)

    def forward(self, x, timestep):
        emb = self.linear(self.silu(self.emb(timestep)))
        scale, shift = torch.chunk(emb, 2)
        x = self.norm(x) * (1 + scale) + shift
        return x
Kashif Rasul's avatar
Kashif Rasul committed
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403


class AdaLayerNormZero(nn.Module):
    """
    Norm layer adaptive layer norm zero (adaLN-Zero).
    """

    def __init__(self, embedding_dim, num_embeddings):
        super().__init__()

        self.emb = CombinedTimestepLabelEmbeddings(num_embeddings, embedding_dim)

        self.silu = nn.SiLU()
        self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True)
        self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)

    def forward(self, x, timestep, class_labels, hidden_dtype=None):
        emb = self.linear(self.silu(self.emb(timestep, class_labels, hidden_dtype=hidden_dtype)))
        shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=1)
        x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None]
        return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
404
405
406
407
408
409
410
411
412
413
414
415
416


class AdaGroupNorm(nn.Module):
    """
    GroupNorm layer modified to incorporate timestep embeddings.
    """

    def __init__(
        self, embedding_dim: int, out_dim: int, num_groups: int, act_fn: Optional[str] = None, eps: float = 1e-5
    ):
        super().__init__()
        self.num_groups = num_groups
        self.eps = eps
417
418
419
420
421

        if act_fn is None:
            self.act = None
        else:
            self.act = get_activation(act_fn)
422
423
424
425
426
427
428
429
430
431
432
433
434

        self.linear = nn.Linear(embedding_dim, out_dim * 2)

    def forward(self, x, emb):
        if self.act:
            emb = self.act(emb)
        emb = self.linear(emb)
        emb = emb[:, :, None, None]
        scale, shift = emb.chunk(2, dim=1)

        x = F.group_norm(x, self.num_groups, eps=self.eps)
        x = x * (1 + scale) + shift
        return x