attention.py 26.8 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
6
7
8
from inspect import isfunction
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from typing import Optional, Any

comfyanonymous's avatar
comfyanonymous committed
9
from .diffusionmodules.util import checkpoint
comfyanonymous's avatar
comfyanonymous committed
10
11
from .sub_quadratic_attention import efficient_dot_product_attention

12
from comfy import model_management
13

14
if model_management.xformers_enabled():
comfyanonymous's avatar
comfyanonymous committed
15
16
17
    import xformers
    import xformers.ops

comfyanonymous's avatar
comfyanonymous committed
18
from comfy.cli_args import args
comfyanonymous's avatar
comfyanonymous committed
19
20
import comfy.ops

comfyanonymous's avatar
comfyanonymous committed
21
# CrossAttn precision handling
comfyanonymous's avatar
comfyanonymous committed
22
23
24
25
26
if args.dont_upcast_attention:
    print("disabling upcasting of attention")
    _ATTN_PRECISION = "fp16"
else:
    _ATTN_PRECISION = "fp32"
comfyanonymous's avatar
comfyanonymous committed
27

28

comfyanonymous's avatar
comfyanonymous committed
29
30
31
32
33
34
35
36
37
38
39
def exists(val):
    return val is not None


def uniq(arr):
    return{el: True for el in arr}.keys()


def default(val, d):
    if exists(val):
        return val
40
    return d
comfyanonymous's avatar
comfyanonymous committed
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55


def max_neg_value(t):
    return -torch.finfo(t.dtype).max


def init_(tensor):
    dim = tensor.shape[-1]
    std = 1 / math.sqrt(dim)
    tensor.uniform_(-std, std)
    return tensor


# feedforward
class GEGLU(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
56
    def __init__(self, dim_in, dim_out, dtype=None, device=None, operations=comfy.ops):
comfyanonymous's avatar
comfyanonymous committed
57
        super().__init__()
comfyanonymous's avatar
comfyanonymous committed
58
        self.proj = operations.Linear(dim_in, dim_out * 2, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
59
60
61
62
63
64
65

    def forward(self, x):
        x, gate = self.proj(x).chunk(2, dim=-1)
        return x * F.gelu(gate)


class FeedForward(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
66
    def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., dtype=None, device=None, operations=comfy.ops):
comfyanonymous's avatar
comfyanonymous committed
67
68
69
70
        super().__init__()
        inner_dim = int(dim * mult)
        dim_out = default(dim_out, dim)
        project_in = nn.Sequential(
comfyanonymous's avatar
comfyanonymous committed
71
            operations.Linear(dim, inner_dim, dtype=dtype, device=device),
comfyanonymous's avatar
comfyanonymous committed
72
            nn.GELU()
comfyanonymous's avatar
comfyanonymous committed
73
        ) if not glu else GEGLU(dim, inner_dim, dtype=dtype, device=device, operations=operations)
comfyanonymous's avatar
comfyanonymous committed
74
75
76
77

        self.net = nn.Sequential(
            project_in,
            nn.Dropout(dropout),
comfyanonymous's avatar
comfyanonymous committed
78
            operations.Linear(inner_dim, dim_out, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
        )

    def forward(self, x):
        return self.net(x)


def zero_module(module):
    """
    Zero out the parameters of a module and return it.
    """
    for p in module.parameters():
        p.detach().zero_()
    return module


94
95
def Normalize(in_channels, dtype=None, device=None):
    return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151


class SpatialSelfAttention(nn.Module):
    def __init__(self, in_channels):
        super().__init__()
        self.in_channels = in_channels

        self.norm = Normalize(in_channels)
        self.q = torch.nn.Conv2d(in_channels,
                                 in_channels,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0)
        self.k = torch.nn.Conv2d(in_channels,
                                 in_channels,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0)
        self.v = torch.nn.Conv2d(in_channels,
                                 in_channels,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0)
        self.proj_out = torch.nn.Conv2d(in_channels,
                                        in_channels,
                                        kernel_size=1,
                                        stride=1,
                                        padding=0)

    def forward(self, x):
        h_ = x
        h_ = self.norm(h_)
        q = self.q(h_)
        k = self.k(h_)
        v = self.v(h_)

        # compute attention
        b,c,h,w = q.shape
        q = rearrange(q, 'b c h w -> b (h w) c')
        k = rearrange(k, 'b c h w -> b c (h w)')
        w_ = torch.einsum('bij,bjk->bik', q, k)

        w_ = w_ * (int(c)**(-0.5))
        w_ = torch.nn.functional.softmax(w_, dim=2)

        # attend to values
        v = rearrange(v, 'b c h w -> b c (h w)')
        w_ = rearrange(w_, 'b i j -> b j i')
        h_ = torch.einsum('bij,bjk->bik', v, w_)
        h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
        h_ = self.proj_out(h_)

        return x+h_


class CrossAttentionBirchSan(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
152
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops):
comfyanonymous's avatar
comfyanonymous committed
153
154
155
156
157
158
159
        super().__init__()
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.scale = dim_head ** -0.5
        self.heads = heads

comfyanonymous's avatar
comfyanonymous committed
160
161
162
        self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
163
164

        self.to_out = nn.Sequential(
comfyanonymous's avatar
comfyanonymous committed
165
            operations.Linear(inner_dim, query_dim, dtype=dtype, device=device),
comfyanonymous's avatar
comfyanonymous committed
166
167
168
            nn.Dropout(dropout)
        )

169
    def forward(self, x, context=None, value=None, mask=None):
comfyanonymous's avatar
comfyanonymous committed
170
171
172
173
174
        h = self.heads

        query = self.to_q(x)
        context = default(context, x)
        key = self.to_k(context)
175
176
177
178
179
        if value is not None:
            value = self.to_v(value)
        else:
            value = self.to_v(context)

comfyanonymous's avatar
comfyanonymous committed
180
181
182
183
184
185
186
187
        del context, x

        query = query.unflatten(-1, (self.heads, -1)).transpose(1,2).flatten(end_dim=1)
        key_t = key.transpose(1,2).unflatten(1, (self.heads, -1)).flatten(end_dim=1)
        del key
        value = value.unflatten(-1, (self.heads, -1)).transpose(1,2).flatten(end_dim=1)

        dtype = query.dtype
188
189
190
191
192
        upcast_attention = _ATTN_PRECISION =="fp32" and query.dtype != torch.float32
        if upcast_attention:
            bytes_per_token = torch.finfo(torch.float32).bits//8
        else:
            bytes_per_token = torch.finfo(query.dtype).bits//8
comfyanonymous's avatar
comfyanonymous committed
193
194
195
196
        batch_x_heads, q_tokens, _ = query.shape
        _, _, k_tokens = key_t.shape
        qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens

197
198
        mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True)

comfyanonymous's avatar
comfyanonymous committed
199
200
201
202
        chunk_threshold_bytes = mem_free_torch * 0.5 #Using only this seems to work better on AMD

        kv_chunk_size_min = None

203
204
205
206
207
208
209
210
        #not sure at all about the math here
        #TODO: tweak this
        if mem_free_total > 8192 * 1024 * 1024 * 1.3:
            query_chunk_size_x = 1024 * 4
        elif mem_free_total > 4096 * 1024 * 1024 * 1.3:
            query_chunk_size_x = 1024 * 2
        else:
            query_chunk_size_x = 1024
comfyanonymous's avatar
comfyanonymous committed
211
        kv_chunk_size_min_x = None
212
        kv_chunk_size_x = (int((chunk_threshold_bytes // (batch_x_heads * bytes_per_token * query_chunk_size_x)) * 2.0) // 1024) * 1024
comfyanonymous's avatar
comfyanonymous committed
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
        if kv_chunk_size_x < 1024:
            kv_chunk_size_x = None

        if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes:
            # the big matmul fits into our memory limit; do everything in 1 chunk,
            # i.e. send it down the unchunked fast-path
            query_chunk_size = q_tokens
            kv_chunk_size = k_tokens
        else:
            query_chunk_size = query_chunk_size_x
            kv_chunk_size = kv_chunk_size_x
            kv_chunk_size_min = kv_chunk_size_min_x

        hidden_states = efficient_dot_product_attention(
            query,
            key_t,
            value,
            query_chunk_size=query_chunk_size,
            kv_chunk_size=kv_chunk_size,
            kv_chunk_size_min=kv_chunk_size_min,
            use_checkpoint=self.training,
234
            upcast_attention=upcast_attention,
comfyanonymous's avatar
comfyanonymous committed
235
236
237
238
239
240
241
242
243
244
245
246
247
248
        )

        hidden_states = hidden_states.to(dtype)

        hidden_states = hidden_states.unflatten(0, (-1, self.heads)).transpose(1,2).flatten(start_dim=2)

        out_proj, dropout = self.to_out
        hidden_states = out_proj(hidden_states)
        hidden_states = dropout(hidden_states)

        return hidden_states


class CrossAttentionDoggettx(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
249
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops):
comfyanonymous's avatar
comfyanonymous committed
250
251
252
253
254
255
256
        super().__init__()
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.scale = dim_head ** -0.5
        self.heads = heads

comfyanonymous's avatar
comfyanonymous committed
257
258
259
        self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
260
261

        self.to_out = nn.Sequential(
comfyanonymous's avatar
comfyanonymous committed
262
            operations.Linear(inner_dim, query_dim, dtype=dtype, device=device),
comfyanonymous's avatar
comfyanonymous committed
263
264
265
            nn.Dropout(dropout)
        )

266
    def forward(self, x, context=None, value=None, mask=None):
comfyanonymous's avatar
comfyanonymous committed
267
268
269
270
271
        h = self.heads

        q_in = self.to_q(x)
        context = default(context, x)
        k_in = self.to_k(context)
272
273
274
275
276
        if value is not None:
            v_in = self.to_v(value)
            del value
        else:
            v_in = self.to_v(context)
comfyanonymous's avatar
comfyanonymous committed
277
278
279
280
281
        del context, x

        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
        del q_in, k_in, v_in

282
        r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
comfyanonymous's avatar
comfyanonymous committed
283

284
        mem_free_total = model_management.get_free_memory(q.device)
comfyanonymous's avatar
comfyanonymous committed
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317

        gb = 1024 ** 3
        tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size()
        modifier = 3 if q.element_size() == 2 else 2.5
        mem_required = tensor_size * modifier
        steps = 1


        if mem_required > mem_free_total:
            steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2)))
            # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB "
            #      f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}")

        if steps > 64:
            max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64
            raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
                               f'Need: {mem_required/64/gb:0.1f}GB free, Have:{mem_free_total/gb:0.1f}GB free')

        # print("steps", steps, mem_required, mem_free_total, modifier, q.element_size(), tensor_size)
        first_op_done = False
        cleared_cache = False
        while True:
            try:
                slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
                for i in range(0, q.shape[1], slice_size):
                    end = i + slice_size
                    if _ATTN_PRECISION =="fp32":
                        with torch.autocast(enabled=False, device_type = 'cuda'):
                            s1 = einsum('b i d, b j d -> b i j', q[:, i:end].float(), k.float()) * self.scale
                    else:
                        s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * self.scale
                    first_op_done = True

318
                    s2 = s1.softmax(dim=-1).to(v.dtype)
comfyanonymous's avatar
comfyanonymous committed
319
320
321
322
323
                    del s1

                    r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v)
                    del s2
                break
324
            except model_management.OOM_EXCEPTION as e:
comfyanonymous's avatar
comfyanonymous committed
325
                if first_op_done == False:
326
                    model_management.soft_empty_cache(True)
comfyanonymous's avatar
comfyanonymous committed
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
                    if cleared_cache == False:
                        cleared_cache = True
                        print("out of memory error, emptying cache and trying again")
                        continue
                    steps *= 2
                    if steps > 64:
                        raise e
                    print("out of memory error, increasing steps and trying again", steps)
                else:
                    raise e

        del q, k, v

        r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h)
        del r1

        return self.to_out(r2)

345
class CrossAttention(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
346
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops):
comfyanonymous's avatar
comfyanonymous committed
347
348
349
350
351
352
353
        super().__init__()
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.scale = dim_head ** -0.5
        self.heads = heads

comfyanonymous's avatar
comfyanonymous committed
354
355
356
        self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
357
358

        self.to_out = nn.Sequential(
comfyanonymous's avatar
comfyanonymous committed
359
            operations.Linear(inner_dim, query_dim, dtype=dtype, device=device),
comfyanonymous's avatar
comfyanonymous committed
360
361
362
            nn.Dropout(dropout)
        )

363
    def forward(self, x, context=None, value=None, mask=None):
comfyanonymous's avatar
comfyanonymous committed
364
365
366
367
368
        h = self.heads

        q = self.to_q(x)
        context = default(context, x)
        k = self.to_k(context)
369
370
371
372
373
        if value is not None:
            v = self.to_v(value)
            del value
        else:
            v = self.to_v(context)
comfyanonymous's avatar
comfyanonymous committed
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401

        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))

        # force cast to fp32 to avoid overflowing
        if _ATTN_PRECISION =="fp32":
            with torch.autocast(enabled=False, device_type = 'cuda'):
                q, k = q.float(), k.float()
                sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
        else:
            sim = einsum('b i d, b j d -> b i j', q, k) * self.scale

        del q, k

        if exists(mask):
            mask = rearrange(mask, 'b ... -> b (...)')
            max_neg_value = -torch.finfo(sim.dtype).max
            mask = repeat(mask, 'b j -> (b h) () j', h=h)
            sim.masked_fill_(~mask, max_neg_value)

        # attention, what we cannot get enough of
        sim = sim.softmax(dim=-1)

        out = einsum('b i j, b j d -> b i d', sim, v)
        out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
        return self.to_out(out)

class MemoryEfficientCrossAttention(nn.Module):
    # https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
comfyanonymous's avatar
comfyanonymous committed
402
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, dtype=None, device=None, operations=comfy.ops):
comfyanonymous's avatar
comfyanonymous committed
403
404
405
406
407
408
409
        super().__init__()
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.heads = heads
        self.dim_head = dim_head

comfyanonymous's avatar
comfyanonymous committed
410
411
412
        self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
413

comfyanonymous's avatar
comfyanonymous committed
414
        self.to_out = nn.Sequential(operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout))
comfyanonymous's avatar
comfyanonymous committed
415
416
        self.attention_op: Optional[Any] = None

417
    def forward(self, x, context=None, value=None, mask=None):
comfyanonymous's avatar
comfyanonymous committed
418
419
420
        q = self.to_q(x)
        context = default(context, x)
        k = self.to_k(context)
421
422
423
424
425
        if value is not None:
            v = self.to_v(value)
            del value
        else:
            v = self.to_v(context)
comfyanonymous's avatar
comfyanonymous committed
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449

        b, _, _ = q.shape
        q, k, v = map(
            lambda t: t.unsqueeze(3)
            .reshape(b, t.shape[1], self.heads, self.dim_head)
            .permute(0, 2, 1, 3)
            .reshape(b * self.heads, t.shape[1], self.dim_head)
            .contiguous(),
            (q, k, v),
        )

        # actually compute the attention, what we cannot get enough of
        out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)

        if exists(mask):
            raise NotImplementedError
        out = (
            out.unsqueeze(0)
            .reshape(b, self.heads, out.shape[1], self.dim_head)
            .permute(0, 2, 1, 3)
            .reshape(b, out.shape[1], self.heads * self.dim_head)
        )
        return self.to_out(out)

450
class CrossAttentionPytorch(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
451
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops):
452
453
454
455
456
457
458
        super().__init__()
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.heads = heads
        self.dim_head = dim_head

comfyanonymous's avatar
comfyanonymous committed
459
460
461
        self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
462

comfyanonymous's avatar
comfyanonymous committed
463
        self.to_out = nn.Sequential(operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout))
464
465
        self.attention_op: Optional[Any] = None

466
    def forward(self, x, context=None, value=None, mask=None):
467
468
469
        q = self.to_q(x)
        context = default(context, x)
        k = self.to_k(context)
470
471
472
473
474
        if value is not None:
            v = self.to_v(value)
            del value
        else:
            v = self.to_v(context)
475
476
477

        b, _, _ = q.shape
        q, k, v = map(
478
            lambda t: t.view(b, -1, self.heads, self.dim_head).transpose(1, 2),
479
480
481
482
483
484
485
486
            (q, k, v),
        )

        out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False)

        if exists(mask):
            raise NotImplementedError
        out = (
487
            out.transpose(1, 2).reshape(b, -1, self.heads * self.dim_head)
488
489
490
491
        )

        return self.to_out(out)

492
493
494
495
496
497
498
if model_management.xformers_enabled():
    print("Using xformers cross attention")
    CrossAttention = MemoryEfficientCrossAttention
elif model_management.pytorch_attention_enabled():
    print("Using pytorch cross attention")
    CrossAttention = CrossAttentionPytorch
else:
499
    if args.use_split_cross_attention:
500
501
502
        print("Using split optimization for cross attention")
        CrossAttention = CrossAttentionDoggettx
    else:
503
504
        print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention")
        CrossAttention = CrossAttentionBirchSan
comfyanonymous's avatar
comfyanonymous committed
505

506

comfyanonymous's avatar
comfyanonymous committed
507
508
class BasicTransformerBlock(nn.Module):
    def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True,
comfyanonymous's avatar
comfyanonymous committed
509
                 disable_self_attn=False, dtype=None, device=None, operations=comfy.ops):
comfyanonymous's avatar
comfyanonymous committed
510
511
        super().__init__()
        self.disable_self_attn = disable_self_attn
512
        self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout,
comfyanonymous's avatar
comfyanonymous committed
513
514
                              context_dim=context_dim if self.disable_self_attn else None, dtype=dtype, device=device, operations=operations)  # is a self-attention if not self.disable_self_attn
        self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations)
515
        self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim,
comfyanonymous's avatar
comfyanonymous committed
516
                              heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype, device=device, operations=operations)  # is self-attn if context is none
517
518
519
        self.norm1 = nn.LayerNorm(dim, dtype=dtype, device=device)
        self.norm2 = nn.LayerNorm(dim, dtype=dtype, device=device)
        self.norm3 = nn.LayerNorm(dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
520
        self.checkpoint = checkpoint
521
522
        self.n_heads = n_heads
        self.d_head = d_head
comfyanonymous's avatar
comfyanonymous committed
523

524
525
    def forward(self, x, context=None, transformer_options={}):
        return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
comfyanonymous's avatar
comfyanonymous committed
526

527
    def _forward(self, x, context=None, transformer_options={}):
528
        extra_options = {}
529
530
        block = None
        block_index = 0
531
        if "current_index" in transformer_options:
532
533
            extra_options["transformer_index"] = transformer_options["current_index"]
        if "block_index" in transformer_options:
534
535
            block_index = transformer_options["block_index"]
            extra_options["block_index"] = block_index
536
537
        if "original_shape" in transformer_options:
            extra_options["original_shape"] = transformer_options["original_shape"]
538
539
540
        if "block" in transformer_options:
            block = transformer_options["block"]
            extra_options["block"] = block
541
542
        if "cond_or_uncond" in transformer_options:
            extra_options["cond_or_uncond"] = transformer_options["cond_or_uncond"]
543
544
545
546
547
        if "patches" in transformer_options:
            transformer_patches = transformer_options["patches"]
        else:
            transformer_patches = {}

548
549
550
551
552
553
554
555
        extra_options["n_heads"] = self.n_heads
        extra_options["dim_head"] = self.d_head

        if "patches_replace" in transformer_options:
            transformer_patches_replace = transformer_options["patches_replace"]
        else:
            transformer_patches_replace = {}

556
        n = self.norm1(x)
557
558
559
560
561
562
563
564
565
566
567
568
        if self.disable_self_attn:
            context_attn1 = context
        else:
            context_attn1 = None
        value_attn1 = None

        if "attn1_patch" in transformer_patches:
            patch = transformer_patches["attn1_patch"]
            if context_attn1 is None:
                context_attn1 = n
            value_attn1 = context_attn1
            for p in patch:
569
                n, context_attn1, value_attn1 = p(n, context_attn1, value_attn1, extra_options)
570

comfyanonymous's avatar
comfyanonymous committed
571
572
573
574
        if block is not None:
            transformer_block = (block[0], block[1], block_index)
        else:
            transformer_block = None
575
576
577
578
579
580
581
582
583
584
585
586
587
588
        attn1_replace_patch = transformer_patches_replace.get("attn1", {})
        block_attn1 = transformer_block
        if block_attn1 not in attn1_replace_patch:
            block_attn1 = block

        if block_attn1 in attn1_replace_patch:
            if context_attn1 is None:
                context_attn1 = n
                value_attn1 = n
            n = self.attn1.to_q(n)
            context_attn1 = self.attn1.to_k(context_attn1)
            value_attn1 = self.attn1.to_v(value_attn1)
            n = attn1_replace_patch[block_attn1](n, context_attn1, value_attn1, extra_options)
            n = self.attn1.to_out(n)
589
        else:
590
            n = self.attn1(n, context=context_attn1, value=value_attn1)
591

592
593
594
595
596
        if "attn1_output_patch" in transformer_patches:
            patch = transformer_patches["attn1_output_patch"]
            for p in patch:
                n = p(n, extra_options)

597
        x += n
598
599
600
        if "middle_patch" in transformer_patches:
            patch = transformer_patches["middle_patch"]
            for p in patch:
601
                x = p(x, extra_options)
602

603
        n = self.norm2(x)
604
605
606
607
608
609
610

        context_attn2 = context
        value_attn2 = None
        if "attn2_patch" in transformer_patches:
            patch = transformer_patches["attn2_patch"]
            value_attn2 = context_attn2
            for p in patch:
611
                n, context_attn2, value_attn2 = p(n, context_attn2, value_attn2, extra_options)
612

613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
        attn2_replace_patch = transformer_patches_replace.get("attn2", {})
        block_attn2 = transformer_block
        if block_attn2 not in attn2_replace_patch:
            block_attn2 = block

        if block_attn2 in attn2_replace_patch:
            if value_attn2 is None:
                value_attn2 = context_attn2
            n = self.attn2.to_q(n)
            context_attn2 = self.attn2.to_k(context_attn2)
            value_attn2 = self.attn2.to_v(value_attn2)
            n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options)
            n = self.attn2.to_out(n)
        else:
            n = self.attn2(n, context=context_attn2, value=value_attn2)
628

629
630
631
632
633
        if "attn2_output_patch" in transformer_patches:
            patch = transformer_patches["attn2_output_patch"]
            for p in patch:
                n = p(n, extra_options)

634
        x += n
comfyanonymous's avatar
comfyanonymous committed
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
        x = self.ff(self.norm3(x)) + x
        return x


class SpatialTransformer(nn.Module):
    """
    Transformer block for image-like data.
    First, project the input (aka embedding)
    and reshape to b, t, d.
    Then apply standard transformer action.
    Finally, reshape to image
    NEW: use_linear for more efficiency instead of the 1x1 convs
    """
    def __init__(self, in_channels, n_heads, d_head,
                 depth=1, dropout=0., context_dim=None,
                 disable_self_attn=False, use_linear=False,
comfyanonymous's avatar
comfyanonymous committed
651
                 use_checkpoint=True, dtype=None, device=None, operations=comfy.ops):
comfyanonymous's avatar
comfyanonymous committed
652
653
        super().__init__()
        if exists(context_dim) and not isinstance(context_dim, list):
654
            context_dim = [context_dim] * depth
comfyanonymous's avatar
comfyanonymous committed
655
656
        self.in_channels = in_channels
        inner_dim = n_heads * d_head
657
        self.norm = Normalize(in_channels, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
658
        if not use_linear:
comfyanonymous's avatar
comfyanonymous committed
659
            self.proj_in = operations.Conv2d(in_channels,
comfyanonymous's avatar
comfyanonymous committed
660
661
662
                                     inner_dim,
                                     kernel_size=1,
                                     stride=1,
663
                                     padding=0, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
664
        else:
comfyanonymous's avatar
comfyanonymous committed
665
            self.proj_in = operations.Linear(in_channels, inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
666
667
668

        self.transformer_blocks = nn.ModuleList(
            [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],
comfyanonymous's avatar
comfyanonymous committed
669
                                   disable_self_attn=disable_self_attn, checkpoint=use_checkpoint, dtype=dtype, device=device, operations=operations)
comfyanonymous's avatar
comfyanonymous committed
670
671
672
                for d in range(depth)]
        )
        if not use_linear:
comfyanonymous's avatar
comfyanonymous committed
673
            self.proj_out = operations.Conv2d(inner_dim,in_channels,
comfyanonymous's avatar
comfyanonymous committed
674
675
                                                  kernel_size=1,
                                                  stride=1,
676
                                                  padding=0, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
677
        else:
comfyanonymous's avatar
comfyanonymous committed
678
            self.proj_out = operations.Linear(in_channels, inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
679
680
        self.use_linear = use_linear

681
    def forward(self, x, context=None, transformer_options={}):
comfyanonymous's avatar
comfyanonymous committed
682
683
        # note: if no context is given, cross-attention defaults to self-attention
        if not isinstance(context, list):
684
            context = [context] * len(self.transformer_blocks)
comfyanonymous's avatar
comfyanonymous committed
685
686
687
688
689
690
691
692
693
        b, c, h, w = x.shape
        x_in = x
        x = self.norm(x)
        if not self.use_linear:
            x = self.proj_in(x)
        x = rearrange(x, 'b c h w -> b (h w) c').contiguous()
        if self.use_linear:
            x = self.proj_in(x)
        for i, block in enumerate(self.transformer_blocks):
694
            transformer_options["block_index"] = i
695
            x = block(x, context=context[i], transformer_options=transformer_options)
comfyanonymous's avatar
comfyanonymous committed
696
697
698
699
700
701
702
        if self.use_linear:
            x = self.proj_out(x)
        x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()
        if not self.use_linear:
            x = self.proj_out(x)
        return x + x_in