"PTDN/test_inference_serving.sh" did not exist on "89c9f363e6c761596cc0d8185fe2c90e7b9a1d9d"
attention.py 28.2 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
6
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from typing import Optional, Any
comfyanonymous's avatar
comfyanonymous committed
7
8

from .diffusionmodules.util import checkpoint, AlphaBlender, timestep_embedding
comfyanonymous's avatar
comfyanonymous committed
9
10
from .sub_quadratic_attention import efficient_dot_product_attention

11
from comfy import model_management
12

13
if model_management.xformers_enabled():
comfyanonymous's avatar
comfyanonymous committed
14
15
16
    import xformers
    import xformers.ops

comfyanonymous's avatar
comfyanonymous committed
17
from comfy.cli_args import args
comfyanonymous's avatar
comfyanonymous committed
18
import comfy.ops
comfyanonymous's avatar
comfyanonymous committed
19
ops = comfy.ops.disable_weight_init
comfyanonymous's avatar
comfyanonymous committed
20

comfyanonymous's avatar
comfyanonymous committed
21
# CrossAttn precision handling
comfyanonymous's avatar
comfyanonymous committed
22
23
24
25
26
if args.dont_upcast_attention:
    print("disabling upcasting of attention")
    _ATTN_PRECISION = "fp16"
else:
    _ATTN_PRECISION = "fp32"
comfyanonymous's avatar
comfyanonymous committed
27

28

comfyanonymous's avatar
comfyanonymous committed
29
30
31
32
33
34
35
36
37
38
39
def exists(val):
    return val is not None


def uniq(arr):
    return{el: True for el in arr}.keys()


def default(val, d):
    if exists(val):
        return val
40
    return d
comfyanonymous's avatar
comfyanonymous committed
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55


def max_neg_value(t):
    return -torch.finfo(t.dtype).max


def init_(tensor):
    dim = tensor.shape[-1]
    std = 1 / math.sqrt(dim)
    tensor.uniform_(-std, std)
    return tensor


# feedforward
class GEGLU(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
56
    def __init__(self, dim_in, dim_out, dtype=None, device=None, operations=ops):
comfyanonymous's avatar
comfyanonymous committed
57
        super().__init__()
comfyanonymous's avatar
comfyanonymous committed
58
        self.proj = operations.Linear(dim_in, dim_out * 2, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
59
60
61
62
63
64
65

    def forward(self, x):
        x, gate = self.proj(x).chunk(2, dim=-1)
        return x * F.gelu(gate)


class FeedForward(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
66
    def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., dtype=None, device=None, operations=ops):
comfyanonymous's avatar
comfyanonymous committed
67
68
69
70
        super().__init__()
        inner_dim = int(dim * mult)
        dim_out = default(dim_out, dim)
        project_in = nn.Sequential(
comfyanonymous's avatar
comfyanonymous committed
71
            operations.Linear(dim, inner_dim, dtype=dtype, device=device),
comfyanonymous's avatar
comfyanonymous committed
72
            nn.GELU()
comfyanonymous's avatar
comfyanonymous committed
73
        ) if not glu else GEGLU(dim, inner_dim, dtype=dtype, device=device, operations=operations)
comfyanonymous's avatar
comfyanonymous committed
74
75
76
77

        self.net = nn.Sequential(
            project_in,
            nn.Dropout(dropout),
comfyanonymous's avatar
comfyanonymous committed
78
            operations.Linear(inner_dim, dim_out, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
79
80
81
82
83
        )

    def forward(self, x):
        return self.net(x)

84
85
def Normalize(in_channels, dtype=None, device=None):
    return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
86

87
def attention_basic(q, k, v, heads, mask=None):
88
89
90
91
    b, _, dim_head = q.shape
    dim_head //= heads
    scale = dim_head ** -0.5

92
    h = heads
93
94
95
96
97
98
99
100
    q, k, v = map(
        lambda t: t.unsqueeze(3)
        .reshape(b, -1, heads, dim_head)
        .permute(0, 2, 1, 3)
        .reshape(b * heads, -1, dim_head)
        .contiguous(),
        (q, k, v),
    )
101
102
103

    # force cast to fp32 to avoid overflowing
    if _ATTN_PRECISION =="fp32":
comfyanonymous's avatar
comfyanonymous committed
104
        sim = einsum('b i d, b j d -> b i j', q.float(), k.float()) * scale
105
106
    else:
        sim = einsum('b i d, b j d -> b i j', q, k) * scale
comfyanonymous's avatar
comfyanonymous committed
107

108
    del q, k
comfyanonymous's avatar
comfyanonymous committed
109

110
    if exists(mask):
111
112
113
114
115
116
        if mask.dtype == torch.bool:
            mask = rearrange(mask, 'b ... -> b (...)') #TODO: check if this bool part matches pytorch attention
            max_neg_value = -torch.finfo(sim.dtype).max
            mask = repeat(mask, 'b j -> (b h) () j', h=h)
            sim.masked_fill_(~mask, max_neg_value)
        else:
117
118
119
120
            if len(mask.shape) == 2:
                bs = 1
            else:
                bs = mask.shape[0]
121
            mask = mask.reshape(bs, -1, mask.shape[-2], mask.shape[-1]).expand(b, heads, -1, -1).reshape(-1, mask.shape[-2], mask.shape[-1])
122
            sim.add_(mask)
comfyanonymous's avatar
comfyanonymous committed
123

124
125
    # attention, what we cannot get enough of
    sim = sim.softmax(dim=-1)
comfyanonymous's avatar
comfyanonymous committed
126

127
    out = einsum('b i j, b j d -> b i d', sim.to(v.dtype), v)
128
129
130
131
132
133
    out = (
        out.unsqueeze(0)
        .reshape(b, heads, -1, dim_head)
        .permute(0, 2, 1, 3)
        .reshape(b, -1, heads * dim_head)
    )
134
    return out
comfyanonymous's avatar
comfyanonymous committed
135
136


137
def attention_sub_quad(query, key, value, heads, mask=None):
138
139
140
141
142
143
144
145
    b, _, dim_head = query.shape
    dim_head //= heads

    scale = dim_head ** -0.5
    query = query.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head)
    value = value.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head)

    key = key.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 3, 1).reshape(b * heads, dim_head, -1)
comfyanonymous's avatar
comfyanonymous committed
146

147
148
149
150
151
152
153
    dtype = query.dtype
    upcast_attention = _ATTN_PRECISION =="fp32" and query.dtype != torch.float32
    if upcast_attention:
        bytes_per_token = torch.finfo(torch.float32).bits//8
    else:
        bytes_per_token = torch.finfo(query.dtype).bits//8
    batch_x_heads, q_tokens, _ = query.shape
154
    _, _, k_tokens = key.shape
155
    qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens
comfyanonymous's avatar
comfyanonymous committed
156

157
    mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True)
comfyanonymous's avatar
comfyanonymous committed
158

159
    kv_chunk_size_min = None
160
161
162
163
164
165
166
167
168
169
170
171
    kv_chunk_size = None
    query_chunk_size = None

    for x in [4096, 2048, 1024, 512, 256]:
        count = mem_free_total / (batch_x_heads * bytes_per_token * x * 4.0)
        if count >= k_tokens:
            kv_chunk_size = k_tokens
            query_chunk_size = x
            break

    if query_chunk_size is None:
        query_chunk_size = 512
172

173
    if mask is not None:
174
175
176
177
        if len(mask.shape) == 2:
            bs = 1
        else:
            bs = mask.shape[0]
178
        mask = mask.reshape(bs, -1, mask.shape[-2], mask.shape[-1]).expand(b, heads, -1, -1).reshape(-1, mask.shape[-2], mask.shape[-1])
179

180
181
    hidden_states = efficient_dot_product_attention(
        query,
182
        key,
183
184
185
186
187
188
        value,
        query_chunk_size=query_chunk_size,
        kv_chunk_size=kv_chunk_size,
        kv_chunk_size_min=kv_chunk_size_min,
        use_checkpoint=False,
        upcast_attention=upcast_attention,
189
        mask=mask,
190
191
192
193
194
195
196
197
    )

    hidden_states = hidden_states.to(dtype)

    hidden_states = hidden_states.unflatten(0, (-1, heads)).transpose(1,2).flatten(start_dim=2)
    return hidden_states

def attention_split(q, k, v, heads, mask=None):
198
199
200
201
    b, _, dim_head = q.shape
    dim_head //= heads
    scale = dim_head ** -0.5

202
    h = heads
203
204
205
206
207
208
209
210
    q, k, v = map(
        lambda t: t.unsqueeze(3)
        .reshape(b, -1, heads, dim_head)
        .permute(0, 2, 1, 3)
        .reshape(b * heads, -1, dim_head)
        .contiguous(),
        (q, k, v),
    )
211
212
213
214
215

    r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)

    mem_free_total = model_management.get_free_memory(q.device)

216
217
218
219
220
    if _ATTN_PRECISION =="fp32":
        element_size = 4
    else:
        element_size = q.element_size()

221
    gb = 1024 ** 3
222
    tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * element_size
223
    modifier = 3
224
225
226
227
228
229
230
231
232
233
234
235
236
237
    mem_required = tensor_size * modifier
    steps = 1


    if mem_required > mem_free_total:
        steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2)))
        # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB "
        #      f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}")

    if steps > 64:
        max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64
        raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
                            f'Need: {mem_required/64/gb:0.1f}GB free, Have:{mem_free_total/gb:0.1f}GB free')

238
    if mask is not None:
239
240
241
242
        if len(mask.shape) == 2:
            bs = 1
        else:
            bs = mask.shape[0]
243
        mask = mask.reshape(bs, -1, mask.shape[-2], mask.shape[-1]).expand(b, heads, -1, -1).reshape(-1, mask.shape[-2], mask.shape[-1])
244

245
246
247
248
249
250
251
252
253
254
255
    # print("steps", steps, mem_required, mem_free_total, modifier, q.element_size(), tensor_size)
    first_op_done = False
    cleared_cache = False
    while True:
        try:
            slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
            for i in range(0, q.shape[1], slice_size):
                end = i + slice_size
                if _ATTN_PRECISION =="fp32":
                    with torch.autocast(enabled=False, device_type = 'cuda'):
                        s1 = einsum('b i d, b j d -> b i j', q[:, i:end].float(), k.float()) * scale
comfyanonymous's avatar
comfyanonymous committed
256
                else:
257
258
                    s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * scale

259
260
261
262
263
264
                if mask is not None:
                    if len(mask.shape) == 2:
                        s1 += mask[i:end]
                    else:
                        s1 += mask[:, i:end]

265
266
                s2 = s1.softmax(dim=-1).to(v.dtype)
                del s1
267
                first_op_done = True
268
269
270
271
272
273
274
275
276
277
278
279
280

                r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v)
                del s2
            break
        except model_management.OOM_EXCEPTION as e:
            if first_op_done == False:
                model_management.soft_empty_cache(True)
                if cleared_cache == False:
                    cleared_cache = True
                    print("out of memory error, emptying cache and trying again")
                    continue
                steps *= 2
                if steps > 64:
comfyanonymous's avatar
comfyanonymous committed
281
                    raise e
282
283
284
285
286
287
                print("out of memory error, increasing steps and trying again", steps)
            else:
                raise e

    del q, k, v

288
289
290
291
292
293
294
    r1 = (
        r1.unsqueeze(0)
        .reshape(b, heads, -1, dim_head)
        .permute(0, 2, 1, 3)
        .reshape(b, -1, heads * dim_head)
    )
    return r1
295

296
297
298
299
300
301
302
303
BROKEN_XFORMERS = False
try:
    x_vers = xformers.__version__
    #I think 0.0.23 is also broken (q with bs bigger than 65535 gives CUDA error)
    BROKEN_XFORMERS = x_vers.startswith("0.0.21") or x_vers.startswith("0.0.22") or x_vers.startswith("0.0.23")
except:
    pass

304
def attention_xformers(q, k, v, heads, mask=None):
305
306
    b, _, dim_head = q.shape
    dim_head //= heads
307
308
309
    if BROKEN_XFORMERS:
        if b * heads > 65535:
            return attention_pytorch(q, k, v, heads, mask)
310

311
312
    q, k, v = map(
        lambda t: t.unsqueeze(3)
313
        .reshape(b, -1, heads, dim_head)
314
        .permute(0, 2, 1, 3)
315
        .reshape(b * heads, -1, dim_head)
316
317
318
319
        .contiguous(),
        (q, k, v),
    )

320
321
322
323
324
325
326
    if mask is not None:
        pad = 8 - q.shape[1] % 8
        mask_out = torch.empty([q.shape[0], q.shape[1], q.shape[1] + pad], dtype=q.dtype, device=q.device)
        mask_out[:, :, :mask.shape[-1]] = mask
        mask = mask_out[:, :, :mask.shape[-1]]

    out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=mask)
327
328
329

    out = (
        out.unsqueeze(0)
330
        .reshape(b, heads, -1, dim_head)
331
        .permute(0, 2, 1, 3)
332
        .reshape(b, -1, heads * dim_head)
333
334
335
336
337
338
339
340
341
342
343
    )
    return out

def attention_pytorch(q, k, v, heads, mask=None):
    b, _, dim_head = q.shape
    dim_head //= heads
    q, k, v = map(
        lambda t: t.view(b, -1, heads, dim_head).transpose(1, 2),
        (q, k, v),
    )

344
    out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)
345
346
347
348
349
    out = (
        out.transpose(1, 2).reshape(b, -1, heads * dim_head)
    )
    return out

350

351
optimized_attention = attention_basic
comfyanonymous's avatar
comfyanonymous committed
352

353
354
355
356
357
358
359
360
361
362
363
364
365
if model_management.xformers_enabled():
    print("Using xformers cross attention")
    optimized_attention = attention_xformers
elif model_management.pytorch_attention_enabled():
    print("Using pytorch cross attention")
    optimized_attention = attention_pytorch
else:
    if args.use_split_cross_attention:
        print("Using split optimization for cross attention")
        optimized_attention = attention_split
    else:
        print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention")
        optimized_attention = attention_sub_quad
comfyanonymous's avatar
comfyanonymous committed
366

367
368
369
optimized_attention_masked = optimized_attention

def optimized_attention_for_device(device, mask=False, small_input=False):
370
371
372
373
374
    if small_input:
        if model_management.pytorch_attention_enabled():
            return attention_pytorch #TODO: need to confirm but this is probably slightly faster for small inputs in all cases
        else:
            return attention_basic
375
376
377

    if device == torch.device("cpu"):
        return attention_sub_quad
378

379
380
381
382
383
384
    if mask:
        return optimized_attention_masked

    return optimized_attention


385
class CrossAttention(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
386
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=ops):
387
388
389
390
391
392
393
        super().__init__()
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.heads = heads
        self.dim_head = dim_head

comfyanonymous's avatar
comfyanonymous committed
394
395
396
        self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
397

comfyanonymous's avatar
comfyanonymous committed
398
        self.to_out = nn.Sequential(operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout))
399

400
    def forward(self, x, context=None, value=None, mask=None):
401
402
403
        q = self.to_q(x)
        context = default(context, x)
        k = self.to_k(context)
404
405
406
407
408
        if value is not None:
            v = self.to_v(value)
            del value
        else:
            v = self.to_v(context)
409

410
411
412
413
        if mask is None:
            out = optimized_attention(q, k, v, self.heads)
        else:
            out = optimized_attention_masked(q, k, v, self.heads, mask)
414
415
        return self.to_out(out)

416

comfyanonymous's avatar
comfyanonymous committed
417
class BasicTransformerBlock(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
418
    def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, ff_in=False, inner_dim=None,
comfyanonymous's avatar
comfyanonymous committed
419
                 disable_self_attn=False, disable_temporal_crossattention=False, switch_temporal_ca_to_sa=False, dtype=None, device=None, operations=ops):
comfyanonymous's avatar
comfyanonymous committed
420
        super().__init__()
comfyanonymous's avatar
comfyanonymous committed
421
422
423
424
425
426
427
428

        self.ff_in = ff_in or inner_dim is not None
        if inner_dim is None:
            inner_dim = dim

        self.is_res = inner_dim == dim

        if self.ff_in:
comfyanonymous's avatar
comfyanonymous committed
429
            self.norm_in = operations.LayerNorm(dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
430
431
            self.ff_in = FeedForward(dim, dim_out=inner_dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations)

comfyanonymous's avatar
comfyanonymous committed
432
        self.disable_self_attn = disable_self_attn
comfyanonymous's avatar
comfyanonymous committed
433
        self.attn1 = CrossAttention(query_dim=inner_dim, heads=n_heads, dim_head=d_head, dropout=dropout,
comfyanonymous's avatar
comfyanonymous committed
434
                              context_dim=context_dim if self.disable_self_attn else None, dtype=dtype, device=device, operations=operations)  # is a self-attention if not self.disable_self_attn
comfyanonymous's avatar
comfyanonymous committed
435
436
437
438
439
440
441
442
443
444
445
446
447
448
        self.ff = FeedForward(inner_dim, dim_out=dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations)

        if disable_temporal_crossattention:
            if switch_temporal_ca_to_sa:
                raise ValueError
            else:
                self.attn2 = None
        else:
            context_dim_attn2 = None
            if not switch_temporal_ca_to_sa:
                context_dim_attn2 = context_dim

            self.attn2 = CrossAttention(query_dim=inner_dim, context_dim=context_dim_attn2,
                                heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype, device=device, operations=operations)  # is self-attn if context is none
449
            self.norm2 = operations.LayerNorm(inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
450

451
452
        self.norm1 = operations.LayerNorm(inner_dim, dtype=dtype, device=device)
        self.norm3 = operations.LayerNorm(inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
453
        self.checkpoint = checkpoint
454
455
        self.n_heads = n_heads
        self.d_head = d_head
comfyanonymous's avatar
comfyanonymous committed
456
        self.switch_temporal_ca_to_sa = switch_temporal_ca_to_sa
comfyanonymous's avatar
comfyanonymous committed
457

458
459
    def forward(self, x, context=None, transformer_options={}):
        return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
comfyanonymous's avatar
comfyanonymous committed
460

461
    def _forward(self, x, context=None, transformer_options={}):
462
        extra_options = {}
463
464
        block = transformer_options.get("block", None)
        block_index = transformer_options.get("block_index", 0)
465
466
467
468
469
470
471
472
473
474
        transformer_patches = {}
        transformer_patches_replace = {}

        for k in transformer_options:
            if k == "patches":
                transformer_patches = transformer_options[k]
            elif k == "patches_replace":
                transformer_patches_replace = transformer_options[k]
            else:
                extra_options[k] = transformer_options[k]
475

476
477
478
        extra_options["n_heads"] = self.n_heads
        extra_options["dim_head"] = self.d_head

comfyanonymous's avatar
comfyanonymous committed
479
480
481
482
483
484
        if self.ff_in:
            x_skip = x
            x = self.ff_in(self.norm_in(x))
            if self.is_res:
                x += x_skip

485
        n = self.norm1(x)
486
487
488
489
490
491
492
493
494
495
496
497
        if self.disable_self_attn:
            context_attn1 = context
        else:
            context_attn1 = None
        value_attn1 = None

        if "attn1_patch" in transformer_patches:
            patch = transformer_patches["attn1_patch"]
            if context_attn1 is None:
                context_attn1 = n
            value_attn1 = context_attn1
            for p in patch:
498
                n, context_attn1, value_attn1 = p(n, context_attn1, value_attn1, extra_options)
499

comfyanonymous's avatar
comfyanonymous committed
500
501
502
503
        if block is not None:
            transformer_block = (block[0], block[1], block_index)
        else:
            transformer_block = None
504
505
506
507
508
509
510
511
512
513
514
515
516
517
        attn1_replace_patch = transformer_patches_replace.get("attn1", {})
        block_attn1 = transformer_block
        if block_attn1 not in attn1_replace_patch:
            block_attn1 = block

        if block_attn1 in attn1_replace_patch:
            if context_attn1 is None:
                context_attn1 = n
                value_attn1 = n
            n = self.attn1.to_q(n)
            context_attn1 = self.attn1.to_k(context_attn1)
            value_attn1 = self.attn1.to_v(value_attn1)
            n = attn1_replace_patch[block_attn1](n, context_attn1, value_attn1, extra_options)
            n = self.attn1.to_out(n)
518
        else:
519
            n = self.attn1(n, context=context_attn1, value=value_attn1)
520

521
522
523
524
525
        if "attn1_output_patch" in transformer_patches:
            patch = transformer_patches["attn1_output_patch"]
            for p in patch:
                n = p(n, extra_options)

526
        x += n
527
528
529
        if "middle_patch" in transformer_patches:
            patch = transformer_patches["middle_patch"]
            for p in patch:
530
                x = p(x, extra_options)
531

comfyanonymous's avatar
comfyanonymous committed
532
533
534
535
536
537
538
539
540
        if self.attn2 is not None:
            n = self.norm2(x)
            if self.switch_temporal_ca_to_sa:
                context_attn2 = n
            else:
                context_attn2 = context
            value_attn2 = None
            if "attn2_patch" in transformer_patches:
                patch = transformer_patches["attn2_patch"]
541
                value_attn2 = context_attn2
comfyanonymous's avatar
comfyanonymous committed
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
                for p in patch:
                    n, context_attn2, value_attn2 = p(n, context_attn2, value_attn2, extra_options)

            attn2_replace_patch = transformer_patches_replace.get("attn2", {})
            block_attn2 = transformer_block
            if block_attn2 not in attn2_replace_patch:
                block_attn2 = block

            if block_attn2 in attn2_replace_patch:
                if value_attn2 is None:
                    value_attn2 = context_attn2
                n = self.attn2.to_q(n)
                context_attn2 = self.attn2.to_k(context_attn2)
                value_attn2 = self.attn2.to_v(value_attn2)
                n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options)
                n = self.attn2.to_out(n)
            else:
                n = self.attn2(n, context=context_attn2, value=value_attn2)
560

561
562
563
564
565
        if "attn2_output_patch" in transformer_patches:
            patch = transformer_patches["attn2_output_patch"]
            for p in patch:
                n = p(n, extra_options)

566
        x += n
comfyanonymous's avatar
comfyanonymous committed
567
568
569
570
571
572
        if self.is_res:
            x_skip = x
        x = self.ff(self.norm3(x))
        if self.is_res:
            x += x_skip

comfyanonymous's avatar
comfyanonymous committed
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
        return x


class SpatialTransformer(nn.Module):
    """
    Transformer block for image-like data.
    First, project the input (aka embedding)
    and reshape to b, t, d.
    Then apply standard transformer action.
    Finally, reshape to image
    NEW: use_linear for more efficiency instead of the 1x1 convs
    """
    def __init__(self, in_channels, n_heads, d_head,
                 depth=1, dropout=0., context_dim=None,
                 disable_self_attn=False, use_linear=False,
comfyanonymous's avatar
comfyanonymous committed
588
                 use_checkpoint=True, dtype=None, device=None, operations=ops):
comfyanonymous's avatar
comfyanonymous committed
589
590
        super().__init__()
        if exists(context_dim) and not isinstance(context_dim, list):
591
            context_dim = [context_dim] * depth
comfyanonymous's avatar
comfyanonymous committed
592
593
        self.in_channels = in_channels
        inner_dim = n_heads * d_head
594
        self.norm = operations.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
595
        if not use_linear:
comfyanonymous's avatar
comfyanonymous committed
596
            self.proj_in = operations.Conv2d(in_channels,
comfyanonymous's avatar
comfyanonymous committed
597
598
599
                                     inner_dim,
                                     kernel_size=1,
                                     stride=1,
600
                                     padding=0, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
601
        else:
comfyanonymous's avatar
comfyanonymous committed
602
            self.proj_in = operations.Linear(in_channels, inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
603
604
605

        self.transformer_blocks = nn.ModuleList(
            [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],
comfyanonymous's avatar
comfyanonymous committed
606
                                   disable_self_attn=disable_self_attn, checkpoint=use_checkpoint, dtype=dtype, device=device, operations=operations)
comfyanonymous's avatar
comfyanonymous committed
607
608
609
                for d in range(depth)]
        )
        if not use_linear:
comfyanonymous's avatar
comfyanonymous committed
610
            self.proj_out = operations.Conv2d(inner_dim,in_channels,
comfyanonymous's avatar
comfyanonymous committed
611
612
                                                  kernel_size=1,
                                                  stride=1,
613
                                                  padding=0, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
614
        else:
comfyanonymous's avatar
comfyanonymous committed
615
            self.proj_out = operations.Linear(in_channels, inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
616
617
        self.use_linear = use_linear

618
    def forward(self, x, context=None, transformer_options={}):
comfyanonymous's avatar
comfyanonymous committed
619
620
        # note: if no context is given, cross-attention defaults to self-attention
        if not isinstance(context, list):
621
            context = [context] * len(self.transformer_blocks)
comfyanonymous's avatar
comfyanonymous committed
622
623
624
625
626
627
628
629
630
        b, c, h, w = x.shape
        x_in = x
        x = self.norm(x)
        if not self.use_linear:
            x = self.proj_in(x)
        x = rearrange(x, 'b c h w -> b (h w) c').contiguous()
        if self.use_linear:
            x = self.proj_in(x)
        for i, block in enumerate(self.transformer_blocks):
631
            transformer_options["block_index"] = i
632
            x = block(x, context=context[i], transformer_options=transformer_options)
comfyanonymous's avatar
comfyanonymous committed
633
634
635
636
637
638
639
        if self.use_linear:
            x = self.proj_out(x)
        x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()
        if not self.use_linear:
            x = self.proj_out(x)
        return x + x_in

comfyanonymous's avatar
comfyanonymous committed
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661

class SpatialVideoTransformer(SpatialTransformer):
    def __init__(
        self,
        in_channels,
        n_heads,
        d_head,
        depth=1,
        dropout=0.0,
        use_linear=False,
        context_dim=None,
        use_spatial_context=False,
        timesteps=None,
        merge_strategy: str = "fixed",
        merge_factor: float = 0.5,
        time_context_dim=None,
        ff_in=False,
        checkpoint=False,
        time_depth=1,
        disable_self_attn=False,
        disable_temporal_crossattention=False,
        max_time_embed_period: int = 10000,
comfyanonymous's avatar
comfyanonymous committed
662
        dtype=None, device=None, operations=ops
comfyanonymous's avatar
comfyanonymous committed
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
    ):
        super().__init__(
            in_channels,
            n_heads,
            d_head,
            depth=depth,
            dropout=dropout,
            use_checkpoint=checkpoint,
            context_dim=context_dim,
            use_linear=use_linear,
            disable_self_attn=disable_self_attn,
            dtype=dtype, device=device, operations=operations
        )
        self.time_depth = time_depth
        self.depth = depth
        self.max_time_embed_period = max_time_embed_period

        time_mix_d_head = d_head
        n_time_mix_heads = n_heads

        time_mix_inner_dim = int(time_mix_d_head * n_time_mix_heads)

        inner_dim = n_heads * d_head
        if use_spatial_context:
            time_context_dim = context_dim

        self.time_stack = nn.ModuleList(
            [
                BasicTransformerBlock(
                    inner_dim,
                    n_time_mix_heads,
                    time_mix_d_head,
                    dropout=dropout,
                    context_dim=time_context_dim,
                    # timesteps=timesteps,
                    checkpoint=checkpoint,
                    ff_in=ff_in,
                    inner_dim=time_mix_inner_dim,
                    disable_self_attn=disable_self_attn,
                    disable_temporal_crossattention=disable_temporal_crossattention,
                    dtype=dtype, device=device, operations=operations
                )
                for _ in range(self.depth)
            ]
        )

        assert len(self.time_stack) == len(self.transformer_blocks)

        self.use_spatial_context = use_spatial_context
        self.in_channels = in_channels

        time_embed_dim = self.in_channels * 4
        self.time_pos_embed = nn.Sequential(
            operations.Linear(self.in_channels, time_embed_dim, dtype=dtype, device=device),
            nn.SiLU(),
            operations.Linear(time_embed_dim, self.in_channels, dtype=dtype, device=device),
        )

        self.time_mixer = AlphaBlender(
            alpha=merge_factor, merge_strategy=merge_strategy
        )

    def forward(
        self,
        x: torch.Tensor,
        context: Optional[torch.Tensor] = None,
        time_context: Optional[torch.Tensor] = None,
        timesteps: Optional[int] = None,
        image_only_indicator: Optional[torch.Tensor] = None,
        transformer_options={}
    ) -> torch.Tensor:
        _, _, h, w = x.shape
        x_in = x
        spatial_context = None
        if exists(context):
            spatial_context = context

        if self.use_spatial_context:
            assert (
                context.ndim == 3
            ), f"n dims of spatial context should be 3 but are {context.ndim}"

            if time_context is None:
                time_context = context
            time_context_first_timestep = time_context[::timesteps]
            time_context = repeat(
                time_context_first_timestep, "b ... -> (b n) ...", n=h * w
            )
        elif time_context is not None and not self.use_spatial_context:
            time_context = repeat(time_context, "b ... -> (b n) ...", n=h * w)
            if time_context.ndim == 2:
                time_context = rearrange(time_context, "b c -> b 1 c")

        x = self.norm(x)
        if not self.use_linear:
            x = self.proj_in(x)
        x = rearrange(x, "b c h w -> b (h w) c")
        if self.use_linear:
            x = self.proj_in(x)

        num_frames = torch.arange(timesteps, device=x.device)
        num_frames = repeat(num_frames, "t -> b t", b=x.shape[0] // timesteps)
        num_frames = rearrange(num_frames, "b t -> (b t)")
        t_emb = timestep_embedding(num_frames, self.in_channels, repeat_only=False, max_period=self.max_time_embed_period).to(x.dtype)
        emb = self.time_pos_embed(t_emb)
        emb = emb[:, None, :]

        for it_, (block, mix_block) in enumerate(
            zip(self.transformer_blocks, self.time_stack)
        ):
            transformer_options["block_index"] = it_
            x = block(
                x,
                context=spatial_context,
                transformer_options=transformer_options,
            )

            x_mix = x
            x_mix = x_mix + emb

            B, S, C = x_mix.shape
            x_mix = rearrange(x_mix, "(b t) s c -> (b s) t c", t=timesteps)
            x_mix = mix_block(x_mix, context=time_context) #TODO: transformer_options
            x_mix = rearrange(
                x_mix, "(b s) t c -> (b t) s c", s=S, b=B // timesteps, c=C, t=timesteps
            )

            x = self.time_mixer(x_spatial=x, x_temporal=x_mix, image_only_indicator=image_only_indicator)

        if self.use_linear:
            x = self.proj_out(x)
        x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w)
        if not self.use_linear:
            x = self.proj_out(x)
        out = x + x_in
        return out