attention.py 23.3 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
6
7
8
from inspect import isfunction
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from typing import Optional, Any

comfyanonymous's avatar
comfyanonymous committed
9
from .diffusionmodules.util import checkpoint
comfyanonymous's avatar
comfyanonymous committed
10
11
from .sub_quadratic_attention import efficient_dot_product_attention

12
from comfy import model_management
comfyanonymous's avatar
comfyanonymous committed
13
import comfy.ops
14

15
from . import tomesd
16
17

if model_management.xformers_enabled():
comfyanonymous's avatar
comfyanonymous committed
18
19
20
21
22
23
24
    import xformers
    import xformers.ops

# CrossAttn precision handling
import os
_ATTN_PRECISION = os.environ.get("ATTN_PRECISION", "fp32")

comfyanonymous's avatar
comfyanonymous committed
25
from comfy.cli_args import args
26

comfyanonymous's avatar
comfyanonymous committed
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
def exists(val):
    return val is not None


def uniq(arr):
    return{el: True for el in arr}.keys()


def default(val, d):
    if exists(val):
        return val
    return d() if isfunction(d) else d


def max_neg_value(t):
    return -torch.finfo(t.dtype).max


def init_(tensor):
    dim = tensor.shape[-1]
    std = 1 / math.sqrt(dim)
    tensor.uniform_(-std, std)
    return tensor


# feedforward
class GEGLU(nn.Module):
54
    def __init__(self, dim_in, dim_out, dtype=None):
comfyanonymous's avatar
comfyanonymous committed
55
        super().__init__()
56
        self.proj = comfy.ops.Linear(dim_in, dim_out * 2, dtype=dtype)
comfyanonymous's avatar
comfyanonymous committed
57
58
59
60
61
62
63

    def forward(self, x):
        x, gate = self.proj(x).chunk(2, dim=-1)
        return x * F.gelu(gate)


class FeedForward(nn.Module):
64
    def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., dtype=None):
comfyanonymous's avatar
comfyanonymous committed
65
66
67
68
        super().__init__()
        inner_dim = int(dim * mult)
        dim_out = default(dim_out, dim)
        project_in = nn.Sequential(
69
            comfy.ops.Linear(dim, inner_dim, dtype=dtype),
comfyanonymous's avatar
comfyanonymous committed
70
            nn.GELU()
71
        ) if not glu else GEGLU(dim, inner_dim, dtype=dtype)
comfyanonymous's avatar
comfyanonymous committed
72
73
74
75

        self.net = nn.Sequential(
            project_in,
            nn.Dropout(dropout),
76
            comfy.ops.Linear(inner_dim, dim_out, dtype=dtype)
comfyanonymous's avatar
comfyanonymous committed
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
        )

    def forward(self, x):
        return self.net(x)


def zero_module(module):
    """
    Zero out the parameters of a module and return it.
    """
    for p in module.parameters():
        p.detach().zero_()
    return module


92
93
def Normalize(in_channels, dtype=None):
    return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype)
comfyanonymous's avatar
comfyanonymous committed
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149


class SpatialSelfAttention(nn.Module):
    def __init__(self, in_channels):
        super().__init__()
        self.in_channels = in_channels

        self.norm = Normalize(in_channels)
        self.q = torch.nn.Conv2d(in_channels,
                                 in_channels,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0)
        self.k = torch.nn.Conv2d(in_channels,
                                 in_channels,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0)
        self.v = torch.nn.Conv2d(in_channels,
                                 in_channels,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0)
        self.proj_out = torch.nn.Conv2d(in_channels,
                                        in_channels,
                                        kernel_size=1,
                                        stride=1,
                                        padding=0)

    def forward(self, x):
        h_ = x
        h_ = self.norm(h_)
        q = self.q(h_)
        k = self.k(h_)
        v = self.v(h_)

        # compute attention
        b,c,h,w = q.shape
        q = rearrange(q, 'b c h w -> b (h w) c')
        k = rearrange(k, 'b c h w -> b c (h w)')
        w_ = torch.einsum('bij,bjk->bik', q, k)

        w_ = w_ * (int(c)**(-0.5))
        w_ = torch.nn.functional.softmax(w_, dim=2)

        # attend to values
        v = rearrange(v, 'b c h w -> b c (h w)')
        w_ = rearrange(w_, 'b i j -> b j i')
        h_ = torch.einsum('bij,bjk->bik', v, w_)
        h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
        h_ = self.proj_out(h_)

        return x+h_


class CrossAttentionBirchSan(nn.Module):
150
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None):
comfyanonymous's avatar
comfyanonymous committed
151
152
153
154
155
156
157
        super().__init__()
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.scale = dim_head ** -0.5
        self.heads = heads

158
159
160
        self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype)
        self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype)
        self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype)
comfyanonymous's avatar
comfyanonymous committed
161
162

        self.to_out = nn.Sequential(
163
            comfy.ops.Linear(inner_dim, query_dim, dtype=dtype),
comfyanonymous's avatar
comfyanonymous committed
164
165
166
            nn.Dropout(dropout)
        )

167
    def forward(self, x, context=None, value=None, mask=None):
comfyanonymous's avatar
comfyanonymous committed
168
169
170
171
172
        h = self.heads

        query = self.to_q(x)
        context = default(context, x)
        key = self.to_k(context)
173
174
175
176
177
        if value is not None:
            value = self.to_v(value)
        else:
            value = self.to_v(context)

comfyanonymous's avatar
comfyanonymous committed
178
179
180
181
182
183
184
185
        del context, x

        query = query.unflatten(-1, (self.heads, -1)).transpose(1,2).flatten(end_dim=1)
        key_t = key.transpose(1,2).unflatten(1, (self.heads, -1)).flatten(end_dim=1)
        del key
        value = value.unflatten(-1, (self.heads, -1)).transpose(1,2).flatten(end_dim=1)

        dtype = query.dtype
186
187
188
189
190
        upcast_attention = _ATTN_PRECISION =="fp32" and query.dtype != torch.float32
        if upcast_attention:
            bytes_per_token = torch.finfo(torch.float32).bits//8
        else:
            bytes_per_token = torch.finfo(query.dtype).bits//8
comfyanonymous's avatar
comfyanonymous committed
191
192
193
194
        batch_x_heads, q_tokens, _ = query.shape
        _, _, k_tokens = key_t.shape
        qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens

195
196
        mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True)

comfyanonymous's avatar
comfyanonymous committed
197
198
199
200
        chunk_threshold_bytes = mem_free_torch * 0.5 #Using only this seems to work better on AMD

        kv_chunk_size_min = None

201
202
203
204
205
206
207
208
        #not sure at all about the math here
        #TODO: tweak this
        if mem_free_total > 8192 * 1024 * 1024 * 1.3:
            query_chunk_size_x = 1024 * 4
        elif mem_free_total > 4096 * 1024 * 1024 * 1.3:
            query_chunk_size_x = 1024 * 2
        else:
            query_chunk_size_x = 1024
comfyanonymous's avatar
comfyanonymous committed
209
        kv_chunk_size_min_x = None
210
        kv_chunk_size_x = (int((chunk_threshold_bytes // (batch_x_heads * bytes_per_token * query_chunk_size_x)) * 2.0) // 1024) * 1024
comfyanonymous's avatar
comfyanonymous committed
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
        if kv_chunk_size_x < 1024:
            kv_chunk_size_x = None

        if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes:
            # the big matmul fits into our memory limit; do everything in 1 chunk,
            # i.e. send it down the unchunked fast-path
            query_chunk_size = q_tokens
            kv_chunk_size = k_tokens
        else:
            query_chunk_size = query_chunk_size_x
            kv_chunk_size = kv_chunk_size_x
            kv_chunk_size_min = kv_chunk_size_min_x

        hidden_states = efficient_dot_product_attention(
            query,
            key_t,
            value,
            query_chunk_size=query_chunk_size,
            kv_chunk_size=kv_chunk_size,
            kv_chunk_size_min=kv_chunk_size_min,
            use_checkpoint=self.training,
232
            upcast_attention=upcast_attention,
comfyanonymous's avatar
comfyanonymous committed
233
234
235
236
237
238
239
240
241
242
243
244
245
246
        )

        hidden_states = hidden_states.to(dtype)

        hidden_states = hidden_states.unflatten(0, (-1, self.heads)).transpose(1,2).flatten(start_dim=2)

        out_proj, dropout = self.to_out
        hidden_states = out_proj(hidden_states)
        hidden_states = dropout(hidden_states)

        return hidden_states


class CrossAttentionDoggettx(nn.Module):
247
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None):
comfyanonymous's avatar
comfyanonymous committed
248
249
250
251
252
253
254
        super().__init__()
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.scale = dim_head ** -0.5
        self.heads = heads

255
256
257
        self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype)
        self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype)
        self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype)
comfyanonymous's avatar
comfyanonymous committed
258
259

        self.to_out = nn.Sequential(
260
            comfy.ops.Linear(inner_dim, query_dim, dtype=dtype),
comfyanonymous's avatar
comfyanonymous committed
261
262
263
            nn.Dropout(dropout)
        )

264
    def forward(self, x, context=None, value=None, mask=None):
comfyanonymous's avatar
comfyanonymous committed
265
266
267
268
269
        h = self.heads

        q_in = self.to_q(x)
        context = default(context, x)
        k_in = self.to_k(context)
270
271
272
273
274
        if value is not None:
            v_in = self.to_v(value)
            del value
        else:
            v_in = self.to_v(context)
comfyanonymous's avatar
comfyanonymous committed
275
276
277
278
279
280
281
        del context, x

        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
        del q_in, k_in, v_in

        r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device)

282
        mem_free_total = model_management.get_free_memory(q.device)
comfyanonymous's avatar
comfyanonymous committed
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321

        gb = 1024 ** 3
        tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size()
        modifier = 3 if q.element_size() == 2 else 2.5
        mem_required = tensor_size * modifier
        steps = 1


        if mem_required > mem_free_total:
            steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2)))
            # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB "
            #      f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}")

        if steps > 64:
            max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64
            raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
                               f'Need: {mem_required/64/gb:0.1f}GB free, Have:{mem_free_total/gb:0.1f}GB free')

        # print("steps", steps, mem_required, mem_free_total, modifier, q.element_size(), tensor_size)
        first_op_done = False
        cleared_cache = False
        while True:
            try:
                slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
                for i in range(0, q.shape[1], slice_size):
                    end = i + slice_size
                    if _ATTN_PRECISION =="fp32":
                        with torch.autocast(enabled=False, device_type = 'cuda'):
                            s1 = einsum('b i d, b j d -> b i j', q[:, i:end].float(), k.float()) * self.scale
                    else:
                        s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * self.scale
                    first_op_done = True

                    s2 = s1.softmax(dim=-1)
                    del s1

                    r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v)
                    del s2
                break
322
            except model_management.OOM_EXCEPTION as e:
comfyanonymous's avatar
comfyanonymous committed
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
                if first_op_done == False:
                    torch.cuda.empty_cache()
                    torch.cuda.ipc_collect()
                    if cleared_cache == False:
                        cleared_cache = True
                        print("out of memory error, emptying cache and trying again")
                        continue
                    steps *= 2
                    if steps > 64:
                        raise e
                    print("out of memory error, increasing steps and trying again", steps)
                else:
                    raise e

        del q, k, v

        r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h)
        del r1

        return self.to_out(r2)

344
class CrossAttention(nn.Module):
345
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None):
comfyanonymous's avatar
comfyanonymous committed
346
347
348
349
350
351
352
        super().__init__()
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.scale = dim_head ** -0.5
        self.heads = heads

353
354
355
        self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype)
        self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype)
        self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype)
comfyanonymous's avatar
comfyanonymous committed
356
357

        self.to_out = nn.Sequential(
358
            comfy.ops.Linear(inner_dim, query_dim, dtype=dtype),
comfyanonymous's avatar
comfyanonymous committed
359
360
361
            nn.Dropout(dropout)
        )

362
    def forward(self, x, context=None, value=None, mask=None):
comfyanonymous's avatar
comfyanonymous committed
363
364
365
366
367
        h = self.heads

        q = self.to_q(x)
        context = default(context, x)
        k = self.to_k(context)
368
369
370
371
372
        if value is not None:
            v = self.to_v(value)
            del value
        else:
            v = self.to_v(context)
comfyanonymous's avatar
comfyanonymous committed
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400

        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))

        # force cast to fp32 to avoid overflowing
        if _ATTN_PRECISION =="fp32":
            with torch.autocast(enabled=False, device_type = 'cuda'):
                q, k = q.float(), k.float()
                sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
        else:
            sim = einsum('b i d, b j d -> b i j', q, k) * self.scale

        del q, k

        if exists(mask):
            mask = rearrange(mask, 'b ... -> b (...)')
            max_neg_value = -torch.finfo(sim.dtype).max
            mask = repeat(mask, 'b j -> (b h) () j', h=h)
            sim.masked_fill_(~mask, max_neg_value)

        # attention, what we cannot get enough of
        sim = sim.softmax(dim=-1)

        out = einsum('b i j, b j d -> b i d', sim, v)
        out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
        return self.to_out(out)

class MemoryEfficientCrossAttention(nn.Module):
    # https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
401
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, dtype=None):
comfyanonymous's avatar
comfyanonymous committed
402
403
404
405
406
407
408
409
410
        super().__init__()
        print(f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using "
              f"{heads} heads.")
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.heads = heads
        self.dim_head = dim_head

411
412
413
        self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype)
        self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype)
        self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype)
comfyanonymous's avatar
comfyanonymous committed
414

415
        self.to_out = nn.Sequential(comfy.ops.Linear(inner_dim, query_dim, dtype=dtype), nn.Dropout(dropout))
comfyanonymous's avatar
comfyanonymous committed
416
417
        self.attention_op: Optional[Any] = None

418
    def forward(self, x, context=None, value=None, mask=None):
comfyanonymous's avatar
comfyanonymous committed
419
420
421
        q = self.to_q(x)
        context = default(context, x)
        k = self.to_k(context)
422
423
424
425
426
        if value is not None:
            v = self.to_v(value)
            del value
        else:
            v = self.to_v(context)
comfyanonymous's avatar
comfyanonymous committed
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450

        b, _, _ = q.shape
        q, k, v = map(
            lambda t: t.unsqueeze(3)
            .reshape(b, t.shape[1], self.heads, self.dim_head)
            .permute(0, 2, 1, 3)
            .reshape(b * self.heads, t.shape[1], self.dim_head)
            .contiguous(),
            (q, k, v),
        )

        # actually compute the attention, what we cannot get enough of
        out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)

        if exists(mask):
            raise NotImplementedError
        out = (
            out.unsqueeze(0)
            .reshape(b, self.heads, out.shape[1], self.dim_head)
            .permute(0, 2, 1, 3)
            .reshape(b, out.shape[1], self.heads * self.dim_head)
        )
        return self.to_out(out)

451
class CrossAttentionPytorch(nn.Module):
452
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None):
453
454
455
456
457
458
459
        super().__init__()
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.heads = heads
        self.dim_head = dim_head

460
461
462
        self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype)
        self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype)
        self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype)
463

464
        self.to_out = nn.Sequential(comfy.ops.Linear(inner_dim, query_dim, dtype=dtype), nn.Dropout(dropout))
465
466
        self.attention_op: Optional[Any] = None

467
    def forward(self, x, context=None, value=None, mask=None):
468
469
470
        q = self.to_q(x)
        context = default(context, x)
        k = self.to_k(context)
471
472
473
474
475
        if value is not None:
            v = self.to_v(value)
            del value
        else:
            v = self.to_v(context)
476
477
478

        b, _, _ = q.shape
        q, k, v = map(
479
            lambda t: t.view(b, -1, self.heads, self.dim_head).transpose(1, 2),
480
481
482
483
484
485
486
487
            (q, k, v),
        )

        out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False)

        if exists(mask):
            raise NotImplementedError
        out = (
488
            out.transpose(1, 2).reshape(b, -1, self.heads * self.dim_head)
489
490
491
492
        )

        return self.to_out(out)

493
494
495
496
497
498
499
if model_management.xformers_enabled():
    print("Using xformers cross attention")
    CrossAttention = MemoryEfficientCrossAttention
elif model_management.pytorch_attention_enabled():
    print("Using pytorch cross attention")
    CrossAttention = CrossAttentionPytorch
else:
500
    if args.use_split_cross_attention:
501
502
503
        print("Using split optimization for cross attention")
        CrossAttention = CrossAttentionDoggettx
    else:
504
505
        print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention")
        CrossAttention = CrossAttentionBirchSan
comfyanonymous's avatar
comfyanonymous committed
506

507

comfyanonymous's avatar
comfyanonymous committed
508
509
class BasicTransformerBlock(nn.Module):
    def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True,
510
                 disable_self_attn=False, dtype=None):
comfyanonymous's avatar
comfyanonymous committed
511
512
        super().__init__()
        self.disable_self_attn = disable_self_attn
513
        self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout,
514
515
                              context_dim=context_dim if self.disable_self_attn else None, dtype=dtype)  # is a self-attention if not self.disable_self_attn
        self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff, dtype=dtype)
516
        self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim,
517
518
519
520
                              heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype)  # is self-attn if context is none
        self.norm1 = nn.LayerNorm(dim, dtype=dtype)
        self.norm2 = nn.LayerNorm(dim, dtype=dtype)
        self.norm3 = nn.LayerNorm(dim, dtype=dtype)
comfyanonymous's avatar
comfyanonymous committed
521
522
        self.checkpoint = checkpoint

523
524
    def forward(self, x, context=None, transformer_options={}):
        return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
comfyanonymous's avatar
comfyanonymous committed
525

526
    def _forward(self, x, context=None, transformer_options={}):
527
528
529
530
531
532
533
534
        current_index = None
        if "current_index" in transformer_options:
            current_index = transformer_options["current_index"]
        if "patches" in transformer_options:
            transformer_patches = transformer_options["patches"]
        else:
            transformer_patches = {}

535
        n = self.norm1(x)
536
537
538
539
540
541
542
543
544
545
546
547
548
549
        if self.disable_self_attn:
            context_attn1 = context
        else:
            context_attn1 = None
        value_attn1 = None

        if "attn1_patch" in transformer_patches:
            patch = transformer_patches["attn1_patch"]
            if context_attn1 is None:
                context_attn1 = n
            value_attn1 = context_attn1
            for p in patch:
                n, context_attn1, value_attn1 = p(current_index, n, context_attn1, value_attn1)

550
551
        if "tomesd" in transformer_options:
            m, u = tomesd.get_functions(x, transformer_options["tomesd"]["ratio"], transformer_options["original_shape"])
552
            n = u(self.attn1(m(n), context=context_attn1, value=value_attn1))
553
        else:
554
            n = self.attn1(n, context=context_attn1, value=value_attn1)
555
556

        x += n
557
558
559
560
561
        if "middle_patch" in transformer_patches:
            patch = transformer_patches["middle_patch"]
            for p in patch:
                x = p(current_index, x)

562
        n = self.norm2(x)
563
564
565
566
567
568
569
570
571
572

        context_attn2 = context
        value_attn2 = None
        if "attn2_patch" in transformer_patches:
            patch = transformer_patches["attn2_patch"]
            value_attn2 = context_attn2
            for p in patch:
                n, context_attn2, value_attn2 = p(current_index, n, context_attn2, value_attn2)

        n = self.attn2(n, context=context_attn2, value=value_attn2)
573
574

        x += n
comfyanonymous's avatar
comfyanonymous committed
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
        x = self.ff(self.norm3(x)) + x
        return x


class SpatialTransformer(nn.Module):
    """
    Transformer block for image-like data.
    First, project the input (aka embedding)
    and reshape to b, t, d.
    Then apply standard transformer action.
    Finally, reshape to image
    NEW: use_linear for more efficiency instead of the 1x1 convs
    """
    def __init__(self, in_channels, n_heads, d_head,
                 depth=1, dropout=0., context_dim=None,
                 disable_self_attn=False, use_linear=False,
591
                 use_checkpoint=True, dtype=None):
comfyanonymous's avatar
comfyanonymous committed
592
593
594
595
596
        super().__init__()
        if exists(context_dim) and not isinstance(context_dim, list):
            context_dim = [context_dim]
        self.in_channels = in_channels
        inner_dim = n_heads * d_head
597
        self.norm = Normalize(in_channels, dtype=dtype)
comfyanonymous's avatar
comfyanonymous committed
598
599
600
601
602
        if not use_linear:
            self.proj_in = nn.Conv2d(in_channels,
                                     inner_dim,
                                     kernel_size=1,
                                     stride=1,
603
                                     padding=0, dtype=dtype)
comfyanonymous's avatar
comfyanonymous committed
604
        else:
605
            self.proj_in = comfy.ops.Linear(in_channels, inner_dim, dtype=dtype)
comfyanonymous's avatar
comfyanonymous committed
606
607
608

        self.transformer_blocks = nn.ModuleList(
            [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],
609
                                   disable_self_attn=disable_self_attn, checkpoint=use_checkpoint, dtype=dtype)
comfyanonymous's avatar
comfyanonymous committed
610
611
612
                for d in range(depth)]
        )
        if not use_linear:
613
            self.proj_out = nn.Conv2d(inner_dim,in_channels,
comfyanonymous's avatar
comfyanonymous committed
614
615
                                                  kernel_size=1,
                                                  stride=1,
616
                                                  padding=0, dtype=dtype)
comfyanonymous's avatar
comfyanonymous committed
617
        else:
618
            self.proj_out = comfy.ops.Linear(in_channels, inner_dim, dtype=dtype)
comfyanonymous's avatar
comfyanonymous committed
619
620
        self.use_linear = use_linear

621
    def forward(self, x, context=None, transformer_options={}):
comfyanonymous's avatar
comfyanonymous committed
622
623
624
625
626
627
628
629
630
631
632
633
        # note: if no context is given, cross-attention defaults to self-attention
        if not isinstance(context, list):
            context = [context]
        b, c, h, w = x.shape
        x_in = x
        x = self.norm(x)
        if not self.use_linear:
            x = self.proj_in(x)
        x = rearrange(x, 'b c h w -> b (h w) c').contiguous()
        if self.use_linear:
            x = self.proj_in(x)
        for i, block in enumerate(self.transformer_blocks):
634
            x = block(x, context=context[i], transformer_options=transformer_options)
comfyanonymous's avatar
comfyanonymous committed
635
636
637
638
639
640
641
        if self.use_linear:
            x = self.proj_out(x)
        x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()
        if not self.use_linear:
            x = self.proj_out(x)
        return x + x_in