attention.py 13.8 KB
Newer Older
Patrick von Platen's avatar
Patrick von Platen committed
1
# Copyright 2023 The HuggingFace Team. All rights reserved.
2
3
4
5
6
7
8
9
10
11
12
13
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
14
from typing import Optional
15
16

import torch
Patrick von Platen's avatar
Patrick von Platen committed
17
import torch.nn.functional as F
18
19
from torch import nn

20
from ..utils import maybe_allow_in_graph
Patrick von Platen's avatar
Patrick von Platen committed
21
from .attention_processor import Attention
Kashif Rasul's avatar
Kashif Rasul committed
22
from .embeddings import CombinedTimestepLabelEmbeddings
23
24


25
@maybe_allow_in_graph
Patrick von Platen's avatar
Patrick von Platen committed
26
class BasicTransformerBlock(nn.Module):
Kashif Rasul's avatar
Kashif Rasul committed
27
28
29
30
    r"""
    A basic Transformer block.

    Parameters:
Will Berman's avatar
Will Berman committed
31
32
33
34
        dim (`int`): The number of channels in the input and output.
        num_attention_heads (`int`): The number of heads to use for multi-head attention.
        attention_head_dim (`int`): The number of channels in each head.
        dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
Will Berman's avatar
Will Berman committed
35
        cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
36
37
38
39
        only_cross_attention (`bool`, *optional*):
            Whether to use only cross-attention layers. In this case two cross attention layers are used.
        double_self_attention (`bool`, *optional*):
            Whether to use two self-attention layers. In this case no cross attention layers are used.
Will Berman's avatar
Will Berman committed
40
41
42
43
44
        activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
        num_embeds_ada_norm (:
            obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
        attention_bias (:
            obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
Kashif Rasul's avatar
Kashif Rasul committed
45
46
47
48
49
    """

    def __init__(
        self,
        dim: int,
Will Berman's avatar
Will Berman committed
50
51
        num_attention_heads: int,
        attention_head_dim: int,
Kashif Rasul's avatar
Kashif Rasul committed
52
        dropout=0.0,
Will Berman's avatar
Will Berman committed
53
54
55
56
        cross_attention_dim: Optional[int] = None,
        activation_fn: str = "geglu",
        num_embeds_ada_norm: Optional[int] = None,
        attention_bias: bool = False,
57
        only_cross_attention: bool = False,
58
        double_self_attention: bool = False,
59
        upcast_attention: bool = False,
Kashif Rasul's avatar
Kashif Rasul committed
60
61
62
        norm_elementwise_affine: bool = True,
        norm_type: str = "layer_norm",
        final_dropout: bool = False,
Kashif Rasul's avatar
Kashif Rasul committed
63
    ):
Patrick von Platen's avatar
Patrick von Platen committed
64
        super().__init__()
65
        self.only_cross_attention = only_cross_attention
Kashif Rasul's avatar
Kashif Rasul committed
66
67
68
69
70
71
72
73
74

        self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
        self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"

        if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
            raise ValueError(
                f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
                f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
            )
75

76
        # Define 3 blocks. Each block has its own normalization layer.
77
        # 1. Self-Attn
78
79
80
81
82
83
        if self.use_ada_layer_norm:
            self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
        elif self.use_ada_layer_norm_zero:
            self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)
        else:
            self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
Patrick von Platen's avatar
Patrick von Platen committed
84
        self.attn1 = Attention(
Will Berman's avatar
Will Berman committed
85
86
87
88
89
            query_dim=dim,
            heads=num_attention_heads,
            dim_head=attention_head_dim,
            dropout=dropout,
            bias=attention_bias,
90
            cross_attention_dim=cross_attention_dim if only_cross_attention else None,
91
            upcast_attention=upcast_attention,
92
93
        )

94
        # 2. Cross-Attn
95
        if cross_attention_dim is not None or double_self_attention:
96
97
98
99
100
101
102
103
            # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
            # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
            # the second cross attention block.
            self.norm2 = (
                AdaLayerNorm(dim, num_embeds_ada_norm)
                if self.use_ada_layer_norm
                else nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
            )
Patrick von Platen's avatar
Patrick von Platen committed
104
            self.attn2 = Attention(
105
                query_dim=dim,
106
                cross_attention_dim=cross_attention_dim if not double_self_attention else None,
107
108
109
110
                heads=num_attention_heads,
                dim_head=attention_head_dim,
                dropout=dropout,
                bias=attention_bias,
111
                upcast_attention=upcast_attention,
Will Berman's avatar
Will Berman committed
112
            )  # is self-attn if encoder_hidden_states is none
113
114
        else:
            self.norm2 = None
115
            self.attn2 = None
116
117

        # 3. Feed-forward
Kashif Rasul's avatar
Kashif Rasul committed
118
        self.norm3 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine)
119
        self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn, final_dropout=final_dropout)
Patrick von Platen's avatar
Patrick von Platen committed
120

121
122
123
    def forward(
        self,
        hidden_states,
124
        attention_mask=None,
125
        encoder_hidden_states=None,
126
        encoder_attention_mask=None,
127
128
        timestep=None,
        cross_attention_kwargs=None,
Kashif Rasul's avatar
Kashif Rasul committed
129
        class_labels=None,
130
    ):
131
132
        # Notice that normalization is always applied before the real computation in the following blocks.
        # 1. Self-Attention
Kashif Rasul's avatar
Kashif Rasul committed
133
134
135
136
137
138
139
140
141
        if self.use_ada_layer_norm:
            norm_hidden_states = self.norm1(hidden_states, timestep)
        elif self.use_ada_layer_norm_zero:
            norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
                hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
            )
        else:
            norm_hidden_states = self.norm1(hidden_states)

142
143
144
145
146
147
148
        cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}
        attn_output = self.attn1(
            norm_hidden_states,
            encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
            attention_mask=attention_mask,
            **cross_attention_kwargs,
        )
Kashif Rasul's avatar
Kashif Rasul committed
149
150
        if self.use_ada_layer_norm_zero:
            attn_output = gate_msa.unsqueeze(1) * attn_output
151
        hidden_states = attn_output + hidden_states
Will Berman's avatar
Will Berman committed
152

153
        # 2. Cross-Attention
154
155
156
157
        if self.attn2 is not None:
            norm_hidden_states = (
                self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)
            )
158
159
            # TODO (Birch-San): Here we should prepare the encoder_attention mask correctly
            # prepare attention mask here
Kashif Rasul's avatar
Kashif Rasul committed
160

161
162
163
            attn_output = self.attn2(
                norm_hidden_states,
                encoder_hidden_states=encoder_hidden_states,
164
                attention_mask=encoder_attention_mask,
165
                **cross_attention_kwargs,
Will Berman's avatar
Will Berman committed
166
            )
167
            hidden_states = attn_output + hidden_states
Will Berman's avatar
Will Berman committed
168
169

        # 3. Feed-forward
Kashif Rasul's avatar
Kashif Rasul committed
170
171
172
173
174
175
176
177
178
179
180
        norm_hidden_states = self.norm3(hidden_states)

        if self.use_ada_layer_norm_zero:
            norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]

        ff_output = self.ff(norm_hidden_states)

        if self.use_ada_layer_norm_zero:
            ff_output = gate_mlp.unsqueeze(1) * ff_output

        hidden_states = ff_output + hidden_states
Will Berman's avatar
Will Berman committed
181

182
        return hidden_states
Patrick von Platen's avatar
Patrick von Platen committed
183
184
185


class FeedForward(nn.Module):
Kashif Rasul's avatar
Kashif Rasul committed
186
187
188
189
    r"""
    A feed-forward layer.

    Parameters:
Will Berman's avatar
Will Berman committed
190
191
192
193
194
        dim (`int`): The number of channels in the input.
        dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
        mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
        dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
        activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
Kashif Rasul's avatar
Kashif Rasul committed
195
        final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
Kashif Rasul's avatar
Kashif Rasul committed
196
197
198
    """

    def __init__(
Will Berman's avatar
Will Berman committed
199
200
201
202
203
204
        self,
        dim: int,
        dim_out: Optional[int] = None,
        mult: int = 4,
        dropout: float = 0.0,
        activation_fn: str = "geglu",
Kashif Rasul's avatar
Kashif Rasul committed
205
        final_dropout: bool = False,
Kashif Rasul's avatar
Kashif Rasul committed
206
    ):
Patrick von Platen's avatar
Patrick von Platen committed
207
208
        super().__init__()
        inner_dim = int(dim * mult)
209
        dim_out = dim_out if dim_out is not None else dim
Patrick von Platen's avatar
Patrick von Platen committed
210

211
212
        if activation_fn == "gelu":
            act_fn = GELU(dim, inner_dim)
Kashif Rasul's avatar
Kashif Rasul committed
213
214
        if activation_fn == "gelu-approximate":
            act_fn = GELU(dim, inner_dim, approximate="tanh")
215
216
        elif activation_fn == "geglu":
            act_fn = GEGLU(dim, inner_dim)
Will Berman's avatar
Will Berman committed
217
        elif activation_fn == "geglu-approximate":
218
            act_fn = ApproximateGELU(dim, inner_dim)
Will Berman's avatar
Will Berman committed
219
220

        self.net = nn.ModuleList([])
221
        # project in
222
        self.net.append(act_fn)
223
224
225
226
        # project dropout
        self.net.append(nn.Dropout(dropout))
        # project out
        self.net.append(nn.Linear(inner_dim, dim_out))
Kashif Rasul's avatar
Kashif Rasul committed
227
228
229
        # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
        if final_dropout:
            self.net.append(nn.Dropout(dropout))
Patrick von Platen's avatar
Patrick von Platen committed
230

231
    def forward(self, hidden_states):
232
233
234
        for module in self.net:
            hidden_states = module(hidden_states)
        return hidden_states
Patrick von Platen's avatar
Patrick von Platen committed
235

Patrick von Platen's avatar
Patrick von Platen committed
236

237
238
class GELU(nn.Module):
    r"""
Kashif Rasul's avatar
Kashif Rasul committed
239
    GELU activation function with tanh approximation support with `approximate="tanh"`.
240
241
    """

Kashif Rasul's avatar
Kashif Rasul committed
242
    def __init__(self, dim_in: int, dim_out: int, approximate: str = "none"):
243
244
        super().__init__()
        self.proj = nn.Linear(dim_in, dim_out)
Kashif Rasul's avatar
Kashif Rasul committed
245
        self.approximate = approximate
246
247
248

    def gelu(self, gate):
        if gate.device.type != "mps":
Kashif Rasul's avatar
Kashif Rasul committed
249
            return F.gelu(gate, approximate=self.approximate)
250
        # mps: gelu is not implemented for float16
Kashif Rasul's avatar
Kashif Rasul committed
251
        return F.gelu(gate.to(dtype=torch.float32), approximate=self.approximate).to(dtype=gate.dtype)
252
253
254
255
256
257
258

    def forward(self, hidden_states):
        hidden_states = self.proj(hidden_states)
        hidden_states = self.gelu(hidden_states)
        return hidden_states


Patrick von Platen's avatar
Patrick von Platen committed
259
class GEGLU(nn.Module):
Kashif Rasul's avatar
Kashif Rasul committed
260
261
262
263
    r"""
    A variant of the gated linear unit activation function from https://arxiv.org/abs/2002.05202.

    Parameters:
Will Berman's avatar
Will Berman committed
264
265
        dim_in (`int`): The number of channels in the input.
        dim_out (`int`): The number of channels in the output.
Kashif Rasul's avatar
Kashif Rasul committed
266
267
268
    """

    def __init__(self, dim_in: int, dim_out: int):
Patrick von Platen's avatar
Patrick von Platen committed
269
270
271
        super().__init__()
        self.proj = nn.Linear(dim_in, dim_out * 2)

272
273
274
275
276
277
    def gelu(self, gate):
        if gate.device.type != "mps":
            return F.gelu(gate)
        # mps: gelu is not implemented for float16
        return F.gelu(gate.to(dtype=torch.float32)).to(dtype=gate.dtype)

278
279
    def forward(self, hidden_states):
        hidden_states, gate = self.proj(hidden_states).chunk(2, dim=-1)
280
        return hidden_states * self.gelu(gate)
Will Berman's avatar
Will Berman committed
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315


class ApproximateGELU(nn.Module):
    """
    The approximate form of Gaussian Error Linear Unit (GELU)

    For more details, see section 2: https://arxiv.org/abs/1606.08415
    """

    def __init__(self, dim_in: int, dim_out: int):
        super().__init__()
        self.proj = nn.Linear(dim_in, dim_out)

    def forward(self, x):
        x = self.proj(x)
        return x * torch.sigmoid(1.702 * x)


class AdaLayerNorm(nn.Module):
    """
    Norm layer modified to incorporate timestep embeddings.
    """

    def __init__(self, embedding_dim, num_embeddings):
        super().__init__()
        self.emb = nn.Embedding(num_embeddings, embedding_dim)
        self.silu = nn.SiLU()
        self.linear = nn.Linear(embedding_dim, embedding_dim * 2)
        self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False)

    def forward(self, x, timestep):
        emb = self.linear(self.silu(self.emb(timestep)))
        scale, shift = torch.chunk(emb, 2)
        x = self.norm(x) * (1 + scale) + shift
        return x
Kashif Rasul's avatar
Kashif Rasul committed
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336


class AdaLayerNormZero(nn.Module):
    """
    Norm layer adaptive layer norm zero (adaLN-Zero).
    """

    def __init__(self, embedding_dim, num_embeddings):
        super().__init__()

        self.emb = CombinedTimestepLabelEmbeddings(num_embeddings, embedding_dim)

        self.silu = nn.SiLU()
        self.linear = nn.Linear(embedding_dim, 6 * embedding_dim, bias=True)
        self.norm = nn.LayerNorm(embedding_dim, elementwise_affine=False, eps=1e-6)

    def forward(self, x, timestep, class_labels, hidden_dtype=None):
        emb = self.linear(self.silu(self.emb(timestep, class_labels, hidden_dtype=hidden_dtype)))
        shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = emb.chunk(6, dim=1)
        x = self.norm(x) * (1 + scale_msa[:, None]) + shift_msa[:, None]
        return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371


class AdaGroupNorm(nn.Module):
    """
    GroupNorm layer modified to incorporate timestep embeddings.
    """

    def __init__(
        self, embedding_dim: int, out_dim: int, num_groups: int, act_fn: Optional[str] = None, eps: float = 1e-5
    ):
        super().__init__()
        self.num_groups = num_groups
        self.eps = eps
        self.act = None
        if act_fn == "swish":
            self.act = lambda x: F.silu(x)
        elif act_fn == "mish":
            self.act = nn.Mish()
        elif act_fn == "silu":
            self.act = nn.SiLU()
        elif act_fn == "gelu":
            self.act = nn.GELU()

        self.linear = nn.Linear(embedding_dim, out_dim * 2)

    def forward(self, x, emb):
        if self.act:
            emb = self.act(emb)
        emb = self.linear(emb)
        emb = emb[:, :, None, None]
        scale, shift = emb.chunk(2, dim=1)

        x = F.group_norm(x, self.num_groups, eps=self.eps)
        x = x * (1 + scale) + shift
        return x