attention.py 26.5 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
6
7
8
from inspect import isfunction
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from typing import Optional, Any

comfyanonymous's avatar
comfyanonymous committed
9
from .diffusionmodules.util import checkpoint
comfyanonymous's avatar
comfyanonymous committed
10
11
from .sub_quadratic_attention import efficient_dot_product_attention

12
from comfy import model_management
comfyanonymous's avatar
comfyanonymous committed
13
import comfy.ops
14

15
if model_management.xformers_enabled():
comfyanonymous's avatar
comfyanonymous committed
16
17
18
    import xformers
    import xformers.ops

comfyanonymous's avatar
comfyanonymous committed
19
from comfy.cli_args import args
comfyanonymous's avatar
comfyanonymous committed
20
# CrossAttn precision handling
comfyanonymous's avatar
comfyanonymous committed
21
22
23
24
25
if args.dont_upcast_attention:
    print("disabling upcasting of attention")
    _ATTN_PRECISION = "fp16"
else:
    _ATTN_PRECISION = "fp32"
comfyanonymous's avatar
comfyanonymous committed
26

27

comfyanonymous's avatar
comfyanonymous committed
28
29
30
31
32
33
34
35
36
37
38
def exists(val):
    return val is not None


def uniq(arr):
    return{el: True for el in arr}.keys()


def default(val, d):
    if exists(val):
        return val
39
    return d
comfyanonymous's avatar
comfyanonymous committed
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54


def max_neg_value(t):
    return -torch.finfo(t.dtype).max


def init_(tensor):
    dim = tensor.shape[-1]
    std = 1 / math.sqrt(dim)
    tensor.uniform_(-std, std)
    return tensor


# feedforward
class GEGLU(nn.Module):
55
    def __init__(self, dim_in, dim_out, dtype=None, device=None):
comfyanonymous's avatar
comfyanonymous committed
56
        super().__init__()
57
        self.proj = comfy.ops.Linear(dim_in, dim_out * 2, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
58
59
60
61
62
63
64

    def forward(self, x):
        x, gate = self.proj(x).chunk(2, dim=-1)
        return x * F.gelu(gate)


class FeedForward(nn.Module):
65
    def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., dtype=None, device=None):
comfyanonymous's avatar
comfyanonymous committed
66
67
68
69
        super().__init__()
        inner_dim = int(dim * mult)
        dim_out = default(dim_out, dim)
        project_in = nn.Sequential(
70
            comfy.ops.Linear(dim, inner_dim, dtype=dtype, device=device),
comfyanonymous's avatar
comfyanonymous committed
71
            nn.GELU()
72
        ) if not glu else GEGLU(dim, inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
73
74
75
76

        self.net = nn.Sequential(
            project_in,
            nn.Dropout(dropout),
77
            comfy.ops.Linear(inner_dim, dim_out, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
        )

    def forward(self, x):
        return self.net(x)


def zero_module(module):
    """
    Zero out the parameters of a module and return it.
    """
    for p in module.parameters():
        p.detach().zero_()
    return module


93
94
def Normalize(in_channels, dtype=None, device=None):
    return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150


class SpatialSelfAttention(nn.Module):
    def __init__(self, in_channels):
        super().__init__()
        self.in_channels = in_channels

        self.norm = Normalize(in_channels)
        self.q = torch.nn.Conv2d(in_channels,
                                 in_channels,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0)
        self.k = torch.nn.Conv2d(in_channels,
                                 in_channels,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0)
        self.v = torch.nn.Conv2d(in_channels,
                                 in_channels,
                                 kernel_size=1,
                                 stride=1,
                                 padding=0)
        self.proj_out = torch.nn.Conv2d(in_channels,
                                        in_channels,
                                        kernel_size=1,
                                        stride=1,
                                        padding=0)

    def forward(self, x):
        h_ = x
        h_ = self.norm(h_)
        q = self.q(h_)
        k = self.k(h_)
        v = self.v(h_)

        # compute attention
        b,c,h,w = q.shape
        q = rearrange(q, 'b c h w -> b (h w) c')
        k = rearrange(k, 'b c h w -> b c (h w)')
        w_ = torch.einsum('bij,bjk->bik', q, k)

        w_ = w_ * (int(c)**(-0.5))
        w_ = torch.nn.functional.softmax(w_, dim=2)

        # attend to values
        v = rearrange(v, 'b c h w -> b c (h w)')
        w_ = rearrange(w_, 'b i j -> b j i')
        h_ = torch.einsum('bij,bjk->bik', v, w_)
        h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h)
        h_ = self.proj_out(h_)

        return x+h_


class CrossAttentionBirchSan(nn.Module):
151
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None):
comfyanonymous's avatar
comfyanonymous committed
152
153
154
155
156
157
158
        super().__init__()
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.scale = dim_head ** -0.5
        self.heads = heads

159
160
161
        self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
162
163

        self.to_out = nn.Sequential(
164
            comfy.ops.Linear(inner_dim, query_dim, dtype=dtype, device=device),
comfyanonymous's avatar
comfyanonymous committed
165
166
167
            nn.Dropout(dropout)
        )

168
    def forward(self, x, context=None, value=None, mask=None):
comfyanonymous's avatar
comfyanonymous committed
169
170
171
172
173
        h = self.heads

        query = self.to_q(x)
        context = default(context, x)
        key = self.to_k(context)
174
175
176
177
178
        if value is not None:
            value = self.to_v(value)
        else:
            value = self.to_v(context)

comfyanonymous's avatar
comfyanonymous committed
179
180
181
182
183
184
185
186
        del context, x

        query = query.unflatten(-1, (self.heads, -1)).transpose(1,2).flatten(end_dim=1)
        key_t = key.transpose(1,2).unflatten(1, (self.heads, -1)).flatten(end_dim=1)
        del key
        value = value.unflatten(-1, (self.heads, -1)).transpose(1,2).flatten(end_dim=1)

        dtype = query.dtype
187
188
189
190
191
        upcast_attention = _ATTN_PRECISION =="fp32" and query.dtype != torch.float32
        if upcast_attention:
            bytes_per_token = torch.finfo(torch.float32).bits//8
        else:
            bytes_per_token = torch.finfo(query.dtype).bits//8
comfyanonymous's avatar
comfyanonymous committed
192
193
194
195
        batch_x_heads, q_tokens, _ = query.shape
        _, _, k_tokens = key_t.shape
        qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens

196
197
        mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True)

comfyanonymous's avatar
comfyanonymous committed
198
199
200
201
        chunk_threshold_bytes = mem_free_torch * 0.5 #Using only this seems to work better on AMD

        kv_chunk_size_min = None

202
203
204
205
206
207
208
209
        #not sure at all about the math here
        #TODO: tweak this
        if mem_free_total > 8192 * 1024 * 1024 * 1.3:
            query_chunk_size_x = 1024 * 4
        elif mem_free_total > 4096 * 1024 * 1024 * 1.3:
            query_chunk_size_x = 1024 * 2
        else:
            query_chunk_size_x = 1024
comfyanonymous's avatar
comfyanonymous committed
210
        kv_chunk_size_min_x = None
211
        kv_chunk_size_x = (int((chunk_threshold_bytes // (batch_x_heads * bytes_per_token * query_chunk_size_x)) * 2.0) // 1024) * 1024
comfyanonymous's avatar
comfyanonymous committed
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
        if kv_chunk_size_x < 1024:
            kv_chunk_size_x = None

        if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes:
            # the big matmul fits into our memory limit; do everything in 1 chunk,
            # i.e. send it down the unchunked fast-path
            query_chunk_size = q_tokens
            kv_chunk_size = k_tokens
        else:
            query_chunk_size = query_chunk_size_x
            kv_chunk_size = kv_chunk_size_x
            kv_chunk_size_min = kv_chunk_size_min_x

        hidden_states = efficient_dot_product_attention(
            query,
            key_t,
            value,
            query_chunk_size=query_chunk_size,
            kv_chunk_size=kv_chunk_size,
            kv_chunk_size_min=kv_chunk_size_min,
            use_checkpoint=self.training,
233
            upcast_attention=upcast_attention,
comfyanonymous's avatar
comfyanonymous committed
234
235
236
237
238
239
240
241
242
243
244
245
246
247
        )

        hidden_states = hidden_states.to(dtype)

        hidden_states = hidden_states.unflatten(0, (-1, self.heads)).transpose(1,2).flatten(start_dim=2)

        out_proj, dropout = self.to_out
        hidden_states = out_proj(hidden_states)
        hidden_states = dropout(hidden_states)

        return hidden_states


class CrossAttentionDoggettx(nn.Module):
248
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None):
comfyanonymous's avatar
comfyanonymous committed
249
250
251
252
253
254
255
        super().__init__()
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.scale = dim_head ** -0.5
        self.heads = heads

256
257
258
        self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
259
260

        self.to_out = nn.Sequential(
261
            comfy.ops.Linear(inner_dim, query_dim, dtype=dtype, device=device),
comfyanonymous's avatar
comfyanonymous committed
262
263
264
            nn.Dropout(dropout)
        )

265
    def forward(self, x, context=None, value=None, mask=None):
comfyanonymous's avatar
comfyanonymous committed
266
267
268
269
270
        h = self.heads

        q_in = self.to_q(x)
        context = default(context, x)
        k_in = self.to_k(context)
271
272
273
274
275
        if value is not None:
            v_in = self.to_v(value)
            del value
        else:
            v_in = self.to_v(context)
comfyanonymous's avatar
comfyanonymous committed
276
277
278
279
280
        del context, x

        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
        del q_in, k_in, v_in

281
        r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
comfyanonymous's avatar
comfyanonymous committed
282

283
        mem_free_total = model_management.get_free_memory(q.device)
comfyanonymous's avatar
comfyanonymous committed
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316

        gb = 1024 ** 3
        tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size()
        modifier = 3 if q.element_size() == 2 else 2.5
        mem_required = tensor_size * modifier
        steps = 1


        if mem_required > mem_free_total:
            steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2)))
            # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB "
            #      f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}")

        if steps > 64:
            max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64
            raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
                               f'Need: {mem_required/64/gb:0.1f}GB free, Have:{mem_free_total/gb:0.1f}GB free')

        # print("steps", steps, mem_required, mem_free_total, modifier, q.element_size(), tensor_size)
        first_op_done = False
        cleared_cache = False
        while True:
            try:
                slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
                for i in range(0, q.shape[1], slice_size):
                    end = i + slice_size
                    if _ATTN_PRECISION =="fp32":
                        with torch.autocast(enabled=False, device_type = 'cuda'):
                            s1 = einsum('b i d, b j d -> b i j', q[:, i:end].float(), k.float()) * self.scale
                    else:
                        s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * self.scale
                    first_op_done = True

317
                    s2 = s1.softmax(dim=-1).to(v.dtype)
comfyanonymous's avatar
comfyanonymous committed
318
319
320
321
322
                    del s1

                    r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v)
                    del s2
                break
323
            except model_management.OOM_EXCEPTION as e:
comfyanonymous's avatar
comfyanonymous committed
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
                if first_op_done == False:
                    torch.cuda.empty_cache()
                    torch.cuda.ipc_collect()
                    if cleared_cache == False:
                        cleared_cache = True
                        print("out of memory error, emptying cache and trying again")
                        continue
                    steps *= 2
                    if steps > 64:
                        raise e
                    print("out of memory error, increasing steps and trying again", steps)
                else:
                    raise e

        del q, k, v

        r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h)
        del r1

        return self.to_out(r2)

345
class CrossAttention(nn.Module):
346
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None):
comfyanonymous's avatar
comfyanonymous committed
347
348
349
350
351
352
353
        super().__init__()
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.scale = dim_head ** -0.5
        self.heads = heads

354
355
356
        self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
357
358

        self.to_out = nn.Sequential(
359
            comfy.ops.Linear(inner_dim, query_dim, dtype=dtype, device=device),
comfyanonymous's avatar
comfyanonymous committed
360
361
362
            nn.Dropout(dropout)
        )

363
    def forward(self, x, context=None, value=None, mask=None):
comfyanonymous's avatar
comfyanonymous committed
364
365
366
367
368
        h = self.heads

        q = self.to_q(x)
        context = default(context, x)
        k = self.to_k(context)
369
370
371
372
373
        if value is not None:
            v = self.to_v(value)
            del value
        else:
            v = self.to_v(context)
comfyanonymous's avatar
comfyanonymous committed
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401

        q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))

        # force cast to fp32 to avoid overflowing
        if _ATTN_PRECISION =="fp32":
            with torch.autocast(enabled=False, device_type = 'cuda'):
                q, k = q.float(), k.float()
                sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
        else:
            sim = einsum('b i d, b j d -> b i j', q, k) * self.scale

        del q, k

        if exists(mask):
            mask = rearrange(mask, 'b ... -> b (...)')
            max_neg_value = -torch.finfo(sim.dtype).max
            mask = repeat(mask, 'b j -> (b h) () j', h=h)
            sim.masked_fill_(~mask, max_neg_value)

        # attention, what we cannot get enough of
        sim = sim.softmax(dim=-1)

        out = einsum('b i j, b j d -> b i d', sim, v)
        out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
        return self.to_out(out)

class MemoryEfficientCrossAttention(nn.Module):
    # https://github.com/MatthieuTPHR/diffusers/blob/d80b531ff8060ec1ea982b65a1b8df70f73aa67c/src/diffusers/models/attention.py#L223
402
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.0, dtype=None, device=None):
comfyanonymous's avatar
comfyanonymous committed
403
404
405
406
407
408
409
410
411
        super().__init__()
        print(f"Setting up {self.__class__.__name__}. Query dim is {query_dim}, context_dim is {context_dim} and using "
              f"{heads} heads.")
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.heads = heads
        self.dim_head = dim_head

412
413
414
        self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
415

416
        self.to_out = nn.Sequential(comfy.ops.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout))
comfyanonymous's avatar
comfyanonymous committed
417
418
        self.attention_op: Optional[Any] = None

419
    def forward(self, x, context=None, value=None, mask=None):
comfyanonymous's avatar
comfyanonymous committed
420
421
422
        q = self.to_q(x)
        context = default(context, x)
        k = self.to_k(context)
423
424
425
426
427
        if value is not None:
            v = self.to_v(value)
            del value
        else:
            v = self.to_v(context)
comfyanonymous's avatar
comfyanonymous committed
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451

        b, _, _ = q.shape
        q, k, v = map(
            lambda t: t.unsqueeze(3)
            .reshape(b, t.shape[1], self.heads, self.dim_head)
            .permute(0, 2, 1, 3)
            .reshape(b * self.heads, t.shape[1], self.dim_head)
            .contiguous(),
            (q, k, v),
        )

        # actually compute the attention, what we cannot get enough of
        out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)

        if exists(mask):
            raise NotImplementedError
        out = (
            out.unsqueeze(0)
            .reshape(b, self.heads, out.shape[1], self.dim_head)
            .permute(0, 2, 1, 3)
            .reshape(b, out.shape[1], self.heads * self.dim_head)
        )
        return self.to_out(out)

452
class CrossAttentionPytorch(nn.Module):
453
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None):
454
455
456
457
458
459
460
        super().__init__()
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.heads = heads
        self.dim_head = dim_head

461
462
463
        self.to_q = comfy.ops.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_k = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_v = comfy.ops.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
464

465
        self.to_out = nn.Sequential(comfy.ops.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout))
466
467
        self.attention_op: Optional[Any] = None

468
    def forward(self, x, context=None, value=None, mask=None):
469
470
471
        q = self.to_q(x)
        context = default(context, x)
        k = self.to_k(context)
472
473
474
475
476
        if value is not None:
            v = self.to_v(value)
            del value
        else:
            v = self.to_v(context)
477
478
479

        b, _, _ = q.shape
        q, k, v = map(
480
            lambda t: t.view(b, -1, self.heads, self.dim_head).transpose(1, 2),
481
482
483
484
485
486
487
488
            (q, k, v),
        )

        out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=None, dropout_p=0.0, is_causal=False)

        if exists(mask):
            raise NotImplementedError
        out = (
489
            out.transpose(1, 2).reshape(b, -1, self.heads * self.dim_head)
490
491
492
493
        )

        return self.to_out(out)

494
495
496
497
498
499
500
if model_management.xformers_enabled():
    print("Using xformers cross attention")
    CrossAttention = MemoryEfficientCrossAttention
elif model_management.pytorch_attention_enabled():
    print("Using pytorch cross attention")
    CrossAttention = CrossAttentionPytorch
else:
501
    if args.use_split_cross_attention:
502
503
504
        print("Using split optimization for cross attention")
        CrossAttention = CrossAttentionDoggettx
    else:
505
506
        print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention")
        CrossAttention = CrossAttentionBirchSan
comfyanonymous's avatar
comfyanonymous committed
507

508

comfyanonymous's avatar
comfyanonymous committed
509
510
class BasicTransformerBlock(nn.Module):
    def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True,
511
                 disable_self_attn=False, dtype=None, device=None):
comfyanonymous's avatar
comfyanonymous committed
512
513
        super().__init__()
        self.disable_self_attn = disable_self_attn
514
        self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout,
515
516
                              context_dim=context_dim if self.disable_self_attn else None, dtype=dtype, device=device)  # is a self-attention if not self.disable_self_attn
        self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device)
517
        self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim,
518
519
520
521
                              heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype, device=device)  # is self-attn if context is none
        self.norm1 = nn.LayerNorm(dim, dtype=dtype, device=device)
        self.norm2 = nn.LayerNorm(dim, dtype=dtype, device=device)
        self.norm3 = nn.LayerNorm(dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
522
        self.checkpoint = checkpoint
523
524
        self.n_heads = n_heads
        self.d_head = d_head
comfyanonymous's avatar
comfyanonymous committed
525

526
527
    def forward(self, x, context=None, transformer_options={}):
        return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
comfyanonymous's avatar
comfyanonymous committed
528

529
    def _forward(self, x, context=None, transformer_options={}):
530
        extra_options = {}
531
532
        block = None
        block_index = 0
533
        if "current_index" in transformer_options:
534
535
            extra_options["transformer_index"] = transformer_options["current_index"]
        if "block_index" in transformer_options:
536
537
            block_index = transformer_options["block_index"]
            extra_options["block_index"] = block_index
538
539
        if "original_shape" in transformer_options:
            extra_options["original_shape"] = transformer_options["original_shape"]
540
541
542
        if "block" in transformer_options:
            block = transformer_options["block"]
            extra_options["block"] = block
543
544
545
546
547
        if "patches" in transformer_options:
            transformer_patches = transformer_options["patches"]
        else:
            transformer_patches = {}

548
549
550
551
552
553
554
555
        extra_options["n_heads"] = self.n_heads
        extra_options["dim_head"] = self.d_head

        if "patches_replace" in transformer_options:
            transformer_patches_replace = transformer_options["patches_replace"]
        else:
            transformer_patches_replace = {}

556
        n = self.norm1(x)
557
558
559
560
561
562
563
564
565
566
567
568
        if self.disable_self_attn:
            context_attn1 = context
        else:
            context_attn1 = None
        value_attn1 = None

        if "attn1_patch" in transformer_patches:
            patch = transformer_patches["attn1_patch"]
            if context_attn1 is None:
                context_attn1 = n
            value_attn1 = context_attn1
            for p in patch:
569
                n, context_attn1, value_attn1 = p(n, context_attn1, value_attn1, extra_options)
570

comfyanonymous's avatar
comfyanonymous committed
571
572
573
574
        if block is not None:
            transformer_block = (block[0], block[1], block_index)
        else:
            transformer_block = None
575
576
577
578
579
580
581
582
583
584
585
586
587
588
        attn1_replace_patch = transformer_patches_replace.get("attn1", {})
        block_attn1 = transformer_block
        if block_attn1 not in attn1_replace_patch:
            block_attn1 = block

        if block_attn1 in attn1_replace_patch:
            if context_attn1 is None:
                context_attn1 = n
                value_attn1 = n
            n = self.attn1.to_q(n)
            context_attn1 = self.attn1.to_k(context_attn1)
            value_attn1 = self.attn1.to_v(value_attn1)
            n = attn1_replace_patch[block_attn1](n, context_attn1, value_attn1, extra_options)
            n = self.attn1.to_out(n)
589
        else:
590
            n = self.attn1(n, context=context_attn1, value=value_attn1)
591

592
593
594
595
596
        if "attn1_output_patch" in transformer_patches:
            patch = transformer_patches["attn1_output_patch"]
            for p in patch:
                n = p(n, extra_options)

597
        x += n
598
599
600
        if "middle_patch" in transformer_patches:
            patch = transformer_patches["middle_patch"]
            for p in patch:
601
                x = p(x, extra_options)
602

603
        n = self.norm2(x)
604
605
606
607
608
609
610

        context_attn2 = context
        value_attn2 = None
        if "attn2_patch" in transformer_patches:
            patch = transformer_patches["attn2_patch"]
            value_attn2 = context_attn2
            for p in patch:
611
                n, context_attn2, value_attn2 = p(n, context_attn2, value_attn2, extra_options)
612

613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
        attn2_replace_patch = transformer_patches_replace.get("attn2", {})
        block_attn2 = transformer_block
        if block_attn2 not in attn2_replace_patch:
            block_attn2 = block

        if block_attn2 in attn2_replace_patch:
            if value_attn2 is None:
                value_attn2 = context_attn2
            n = self.attn2.to_q(n)
            context_attn2 = self.attn2.to_k(context_attn2)
            value_attn2 = self.attn2.to_v(value_attn2)
            n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options)
            n = self.attn2.to_out(n)
        else:
            n = self.attn2(n, context=context_attn2, value=value_attn2)
628

629
630
631
632
633
        if "attn2_output_patch" in transformer_patches:
            patch = transformer_patches["attn2_output_patch"]
            for p in patch:
                n = p(n, extra_options)

634
        x += n
comfyanonymous's avatar
comfyanonymous committed
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
        x = self.ff(self.norm3(x)) + x
        return x


class SpatialTransformer(nn.Module):
    """
    Transformer block for image-like data.
    First, project the input (aka embedding)
    and reshape to b, t, d.
    Then apply standard transformer action.
    Finally, reshape to image
    NEW: use_linear for more efficiency instead of the 1x1 convs
    """
    def __init__(self, in_channels, n_heads, d_head,
                 depth=1, dropout=0., context_dim=None,
                 disable_self_attn=False, use_linear=False,
651
                 use_checkpoint=True, dtype=None, device=None):
comfyanonymous's avatar
comfyanonymous committed
652
653
        super().__init__()
        if exists(context_dim) and not isinstance(context_dim, list):
654
            context_dim = [context_dim] * depth
comfyanonymous's avatar
comfyanonymous committed
655
656
        self.in_channels = in_channels
        inner_dim = n_heads * d_head
657
        self.norm = Normalize(in_channels, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
658
659
660
661
662
        if not use_linear:
            self.proj_in = nn.Conv2d(in_channels,
                                     inner_dim,
                                     kernel_size=1,
                                     stride=1,
663
                                     padding=0, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
664
        else:
665
            self.proj_in = comfy.ops.Linear(in_channels, inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
666
667
668

        self.transformer_blocks = nn.ModuleList(
            [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],
669
                                   disable_self_attn=disable_self_attn, checkpoint=use_checkpoint, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
670
671
672
                for d in range(depth)]
        )
        if not use_linear:
673
            self.proj_out = nn.Conv2d(inner_dim,in_channels,
comfyanonymous's avatar
comfyanonymous committed
674
675
                                                  kernel_size=1,
                                                  stride=1,
676
                                                  padding=0, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
677
        else:
678
            self.proj_out = comfy.ops.Linear(in_channels, inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
679
680
        self.use_linear = use_linear

681
    def forward(self, x, context=None, transformer_options={}):
comfyanonymous's avatar
comfyanonymous committed
682
683
        # note: if no context is given, cross-attention defaults to self-attention
        if not isinstance(context, list):
684
            context = [context] * len(self.transformer_blocks)
comfyanonymous's avatar
comfyanonymous committed
685
686
687
688
689
690
691
692
693
        b, c, h, w = x.shape
        x_in = x
        x = self.norm(x)
        if not self.use_linear:
            x = self.proj_in(x)
        x = rearrange(x, 'b c h w -> b (h w) c').contiguous()
        if self.use_linear:
            x = self.proj_in(x)
        for i, block in enumerate(self.transformer_blocks):
694
            transformer_options["block_index"] = i
695
            x = block(x, context=context[i], transformer_options=transformer_options)
comfyanonymous's avatar
comfyanonymous committed
696
697
698
699
700
701
702
        if self.use_linear:
            x = self.proj_out(x)
        x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()
        if not self.use_linear:
            x = self.proj_out(x)
        return x + x_in