attention.py 27.4 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
6
7
from inspect import isfunction
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from typing import Optional, Any
comfyanonymous's avatar
comfyanonymous committed
8
from functools import partial
comfyanonymous's avatar
comfyanonymous committed
9

comfyanonymous's avatar
comfyanonymous committed
10
11

from .diffusionmodules.util import checkpoint, AlphaBlender, timestep_embedding
comfyanonymous's avatar
comfyanonymous committed
12
13
from .sub_quadratic_attention import efficient_dot_product_attention

14
from comfy import model_management
15

16
if model_management.xformers_enabled():
comfyanonymous's avatar
comfyanonymous committed
17
18
19
    import xformers
    import xformers.ops

comfyanonymous's avatar
comfyanonymous committed
20
from comfy.cli_args import args
comfyanonymous's avatar
comfyanonymous committed
21
import comfy.ops
comfyanonymous's avatar
comfyanonymous committed
22
ops = comfy.ops.disable_weight_init
comfyanonymous's avatar
comfyanonymous committed
23

comfyanonymous's avatar
comfyanonymous committed
24
# CrossAttn precision handling
comfyanonymous's avatar
comfyanonymous committed
25
26
27
28
29
if args.dont_upcast_attention:
    print("disabling upcasting of attention")
    _ATTN_PRECISION = "fp16"
else:
    _ATTN_PRECISION = "fp32"
comfyanonymous's avatar
comfyanonymous committed
30

31

comfyanonymous's avatar
comfyanonymous committed
32
33
34
35
36
37
38
39
40
41
42
def exists(val):
    return val is not None


def uniq(arr):
    return{el: True for el in arr}.keys()


def default(val, d):
    if exists(val):
        return val
43
    return d
comfyanonymous's avatar
comfyanonymous committed
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58


def max_neg_value(t):
    return -torch.finfo(t.dtype).max


def init_(tensor):
    dim = tensor.shape[-1]
    std = 1 / math.sqrt(dim)
    tensor.uniform_(-std, std)
    return tensor


# feedforward
class GEGLU(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
59
    def __init__(self, dim_in, dim_out, dtype=None, device=None, operations=ops):
comfyanonymous's avatar
comfyanonymous committed
60
        super().__init__()
comfyanonymous's avatar
comfyanonymous committed
61
        self.proj = operations.Linear(dim_in, dim_out * 2, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
62
63
64
65
66
67
68

    def forward(self, x):
        x, gate = self.proj(x).chunk(2, dim=-1)
        return x * F.gelu(gate)


class FeedForward(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
69
    def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., dtype=None, device=None, operations=ops):
comfyanonymous's avatar
comfyanonymous committed
70
71
72
73
        super().__init__()
        inner_dim = int(dim * mult)
        dim_out = default(dim_out, dim)
        project_in = nn.Sequential(
comfyanonymous's avatar
comfyanonymous committed
74
            operations.Linear(dim, inner_dim, dtype=dtype, device=device),
comfyanonymous's avatar
comfyanonymous committed
75
            nn.GELU()
comfyanonymous's avatar
comfyanonymous committed
76
        ) if not glu else GEGLU(dim, inner_dim, dtype=dtype, device=device, operations=operations)
comfyanonymous's avatar
comfyanonymous committed
77
78
79
80

        self.net = nn.Sequential(
            project_in,
            nn.Dropout(dropout),
comfyanonymous's avatar
comfyanonymous committed
81
            operations.Linear(inner_dim, dim_out, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
82
83
84
85
86
        )

    def forward(self, x):
        return self.net(x)

87
88
def Normalize(in_channels, dtype=None, device=None):
    return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
89

90
def attention_basic(q, k, v, heads, mask=None):
91
92
93
94
    b, _, dim_head = q.shape
    dim_head //= heads
    scale = dim_head ** -0.5

95
    h = heads
96
97
98
99
100
101
102
103
    q, k, v = map(
        lambda t: t.unsqueeze(3)
        .reshape(b, -1, heads, dim_head)
        .permute(0, 2, 1, 3)
        .reshape(b * heads, -1, dim_head)
        .contiguous(),
        (q, k, v),
    )
104
105
106

    # force cast to fp32 to avoid overflowing
    if _ATTN_PRECISION =="fp32":
comfyanonymous's avatar
comfyanonymous committed
107
        sim = einsum('b i d, b j d -> b i j', q.float(), k.float()) * scale
108
109
    else:
        sim = einsum('b i d, b j d -> b i j', q, k) * scale
comfyanonymous's avatar
comfyanonymous committed
110

111
    del q, k
comfyanonymous's avatar
comfyanonymous committed
112

113
    if exists(mask):
114
115
116
117
118
119
120
        if mask.dtype == torch.bool:
            mask = rearrange(mask, 'b ... -> b (...)') #TODO: check if this bool part matches pytorch attention
            max_neg_value = -torch.finfo(sim.dtype).max
            mask = repeat(mask, 'b j -> (b h) () j', h=h)
            sim.masked_fill_(~mask, max_neg_value)
        else:
            sim += mask
comfyanonymous's avatar
comfyanonymous committed
121

122
123
    # attention, what we cannot get enough of
    sim = sim.softmax(dim=-1)
comfyanonymous's avatar
comfyanonymous committed
124

125
    out = einsum('b i j, b j d -> b i d', sim.to(v.dtype), v)
126
127
128
129
130
131
    out = (
        out.unsqueeze(0)
        .reshape(b, heads, -1, dim_head)
        .permute(0, 2, 1, 3)
        .reshape(b, -1, heads * dim_head)
    )
132
    return out
comfyanonymous's avatar
comfyanonymous committed
133
134


135
def attention_sub_quad(query, key, value, heads, mask=None):
136
137
138
139
140
141
142
143
    b, _, dim_head = query.shape
    dim_head //= heads

    scale = dim_head ** -0.5
    query = query.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head)
    value = value.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head)

    key = key.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 3, 1).reshape(b * heads, dim_head, -1)
comfyanonymous's avatar
comfyanonymous committed
144

145
146
147
148
149
150
151
    dtype = query.dtype
    upcast_attention = _ATTN_PRECISION =="fp32" and query.dtype != torch.float32
    if upcast_attention:
        bytes_per_token = torch.finfo(torch.float32).bits//8
    else:
        bytes_per_token = torch.finfo(query.dtype).bits//8
    batch_x_heads, q_tokens, _ = query.shape
152
    _, _, k_tokens = key.shape
153
    qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens
comfyanonymous's avatar
comfyanonymous committed
154

155
    mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True)
comfyanonymous's avatar
comfyanonymous committed
156

157
    kv_chunk_size_min = None
158
159
160
161
162
163
164
165
166
167
168
169
    kv_chunk_size = None
    query_chunk_size = None

    for x in [4096, 2048, 1024, 512, 256]:
        count = mem_free_total / (batch_x_heads * bytes_per_token * x * 4.0)
        if count >= k_tokens:
            kv_chunk_size = k_tokens
            query_chunk_size = x
            break

    if query_chunk_size is None:
        query_chunk_size = 512
170
171
172

    hidden_states = efficient_dot_product_attention(
        query,
173
        key,
174
175
176
177
178
179
        value,
        query_chunk_size=query_chunk_size,
        kv_chunk_size=kv_chunk_size,
        kv_chunk_size_min=kv_chunk_size_min,
        use_checkpoint=False,
        upcast_attention=upcast_attention,
180
        mask=mask,
181
182
183
184
185
186
187
188
    )

    hidden_states = hidden_states.to(dtype)

    hidden_states = hidden_states.unflatten(0, (-1, heads)).transpose(1,2).flatten(start_dim=2)
    return hidden_states

def attention_split(q, k, v, heads, mask=None):
189
190
191
192
    b, _, dim_head = q.shape
    dim_head //= heads
    scale = dim_head ** -0.5

193
    h = heads
194
195
196
197
198
199
200
201
    q, k, v = map(
        lambda t: t.unsqueeze(3)
        .reshape(b, -1, heads, dim_head)
        .permute(0, 2, 1, 3)
        .reshape(b * heads, -1, dim_head)
        .contiguous(),
        (q, k, v),
    )
202
203
204
205
206

    r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)

    mem_free_total = model_management.get_free_memory(q.device)

207
208
209
210
211
    if _ATTN_PRECISION =="fp32":
        element_size = 4
    else:
        element_size = q.element_size()

212
    gb = 1024 ** 3
213
    tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * element_size
214
    modifier = 3
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
    mem_required = tensor_size * modifier
    steps = 1


    if mem_required > mem_free_total:
        steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2)))
        # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB "
        #      f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}")

    if steps > 64:
        max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64
        raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
                            f'Need: {mem_required/64/gb:0.1f}GB free, Have:{mem_free_total/gb:0.1f}GB free')

    # print("steps", steps, mem_required, mem_free_total, modifier, q.element_size(), tensor_size)
    first_op_done = False
    cleared_cache = False
    while True:
        try:
            slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
            for i in range(0, q.shape[1], slice_size):
                end = i + slice_size
                if _ATTN_PRECISION =="fp32":
                    with torch.autocast(enabled=False, device_type = 'cuda'):
                        s1 = einsum('b i d, b j d -> b i j', q[:, i:end].float(), k.float()) * scale
comfyanonymous's avatar
comfyanonymous committed
240
                else:
241
242
                    s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * scale

243
244
245
246
247
248
                if mask is not None:
                    if len(mask.shape) == 2:
                        s1 += mask[i:end]
                    else:
                        s1 += mask[:, i:end]

249
250
                s2 = s1.softmax(dim=-1).to(v.dtype)
                del s1
251
                first_op_done = True
252
253
254
255
256
257
258
259
260
261
262
263
264

                r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v)
                del s2
            break
        except model_management.OOM_EXCEPTION as e:
            if first_op_done == False:
                model_management.soft_empty_cache(True)
                if cleared_cache == False:
                    cleared_cache = True
                    print("out of memory error, emptying cache and trying again")
                    continue
                steps *= 2
                if steps > 64:
comfyanonymous's avatar
comfyanonymous committed
265
                    raise e
266
267
268
269
270
271
                print("out of memory error, increasing steps and trying again", steps)
            else:
                raise e

    del q, k, v

272
273
274
275
276
277
278
    r1 = (
        r1.unsqueeze(0)
        .reshape(b, heads, -1, dim_head)
        .permute(0, 2, 1, 3)
        .reshape(b, -1, heads * dim_head)
    )
    return r1
279

280
281
282
283
284
285
286
287
BROKEN_XFORMERS = False
try:
    x_vers = xformers.__version__
    #I think 0.0.23 is also broken (q with bs bigger than 65535 gives CUDA error)
    BROKEN_XFORMERS = x_vers.startswith("0.0.21") or x_vers.startswith("0.0.22") or x_vers.startswith("0.0.23")
except:
    pass

288
def attention_xformers(q, k, v, heads, mask=None):
289
290
    b, _, dim_head = q.shape
    dim_head //= heads
291
292
293
    if BROKEN_XFORMERS:
        if b * heads > 65535:
            return attention_pytorch(q, k, v, heads, mask)
294

295
296
    q, k, v = map(
        lambda t: t.unsqueeze(3)
297
        .reshape(b, -1, heads, dim_head)
298
        .permute(0, 2, 1, 3)
299
        .reshape(b * heads, -1, dim_head)
300
301
302
303
        .contiguous(),
        (q, k, v),
    )

304
305
306
307
308
309
310
    if mask is not None:
        pad = 8 - q.shape[1] % 8
        mask_out = torch.empty([q.shape[0], q.shape[1], q.shape[1] + pad], dtype=q.dtype, device=q.device)
        mask_out[:, :, :mask.shape[-1]] = mask
        mask = mask_out[:, :, :mask.shape[-1]]

    out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=mask)
311
312
313

    out = (
        out.unsqueeze(0)
314
        .reshape(b, heads, -1, dim_head)
315
        .permute(0, 2, 1, 3)
316
        .reshape(b, -1, heads * dim_head)
317
318
319
320
321
322
323
324
325
326
327
    )
    return out

def attention_pytorch(q, k, v, heads, mask=None):
    b, _, dim_head = q.shape
    dim_head //= heads
    q, k, v = map(
        lambda t: t.view(b, -1, heads, dim_head).transpose(1, 2),
        (q, k, v),
    )

328
    out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)
329
330
331
332
333
    out = (
        out.transpose(1, 2).reshape(b, -1, heads * dim_head)
    )
    return out

334

335
optimized_attention = attention_basic
336
optimized_attention_masked = attention_basic
comfyanonymous's avatar
comfyanonymous committed
337

338
339
340
341
342
343
344
345
346
347
348
349
350
if model_management.xformers_enabled():
    print("Using xformers cross attention")
    optimized_attention = attention_xformers
elif model_management.pytorch_attention_enabled():
    print("Using pytorch cross attention")
    optimized_attention = attention_pytorch
else:
    if args.use_split_cross_attention:
        print("Using split optimization for cross attention")
        optimized_attention = attention_split
    else:
        print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention")
        optimized_attention = attention_sub_quad
comfyanonymous's avatar
comfyanonymous committed
351

352
353
354
if model_management.pytorch_attention_enabled():
    optimized_attention_masked = attention_pytorch

355
356
357
358
359
360
361
362
363
364
365
366
def optimized_attention_for_device(device, mask=False):
    if device == torch.device("cpu"): #TODO
        if model_management.pytorch_attention_enabled():
            return attention_pytorch
        else:
            return attention_basic
    if mask:
        return optimized_attention_masked

    return optimized_attention


367
class CrossAttention(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
368
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=ops):
369
370
371
372
373
374
375
        super().__init__()
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.heads = heads
        self.dim_head = dim_head

comfyanonymous's avatar
comfyanonymous committed
376
377
378
        self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
379

comfyanonymous's avatar
comfyanonymous committed
380
        self.to_out = nn.Sequential(operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout))
381

382
    def forward(self, x, context=None, value=None, mask=None):
383
384
385
        q = self.to_q(x)
        context = default(context, x)
        k = self.to_k(context)
386
387
388
389
390
        if value is not None:
            v = self.to_v(value)
            del value
        else:
            v = self.to_v(context)
391

392
393
394
395
        if mask is None:
            out = optimized_attention(q, k, v, self.heads)
        else:
            out = optimized_attention_masked(q, k, v, self.heads, mask)
396
397
        return self.to_out(out)

398

comfyanonymous's avatar
comfyanonymous committed
399
class BasicTransformerBlock(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
400
    def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, ff_in=False, inner_dim=None,
comfyanonymous's avatar
comfyanonymous committed
401
                 disable_self_attn=False, disable_temporal_crossattention=False, switch_temporal_ca_to_sa=False, dtype=None, device=None, operations=ops):
comfyanonymous's avatar
comfyanonymous committed
402
        super().__init__()
comfyanonymous's avatar
comfyanonymous committed
403
404
405
406
407
408
409
410

        self.ff_in = ff_in or inner_dim is not None
        if inner_dim is None:
            inner_dim = dim

        self.is_res = inner_dim == dim

        if self.ff_in:
comfyanonymous's avatar
comfyanonymous committed
411
            self.norm_in = operations.LayerNorm(dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
412
413
            self.ff_in = FeedForward(dim, dim_out=inner_dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations)

comfyanonymous's avatar
comfyanonymous committed
414
        self.disable_self_attn = disable_self_attn
comfyanonymous's avatar
comfyanonymous committed
415
        self.attn1 = CrossAttention(query_dim=inner_dim, heads=n_heads, dim_head=d_head, dropout=dropout,
comfyanonymous's avatar
comfyanonymous committed
416
                              context_dim=context_dim if self.disable_self_attn else None, dtype=dtype, device=device, operations=operations)  # is a self-attention if not self.disable_self_attn
comfyanonymous's avatar
comfyanonymous committed
417
418
419
420
421
422
423
424
425
426
427
428
429
430
        self.ff = FeedForward(inner_dim, dim_out=dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations)

        if disable_temporal_crossattention:
            if switch_temporal_ca_to_sa:
                raise ValueError
            else:
                self.attn2 = None
        else:
            context_dim_attn2 = None
            if not switch_temporal_ca_to_sa:
                context_dim_attn2 = context_dim

            self.attn2 = CrossAttention(query_dim=inner_dim, context_dim=context_dim_attn2,
                                heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype, device=device, operations=operations)  # is self-attn if context is none
431
            self.norm2 = operations.LayerNorm(inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
432

433
434
        self.norm1 = operations.LayerNorm(inner_dim, dtype=dtype, device=device)
        self.norm3 = operations.LayerNorm(inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
435
        self.checkpoint = checkpoint
436
437
        self.n_heads = n_heads
        self.d_head = d_head
comfyanonymous's avatar
comfyanonymous committed
438
        self.switch_temporal_ca_to_sa = switch_temporal_ca_to_sa
comfyanonymous's avatar
comfyanonymous committed
439

440
441
    def forward(self, x, context=None, transformer_options={}):
        return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
comfyanonymous's avatar
comfyanonymous committed
442

443
    def _forward(self, x, context=None, transformer_options={}):
444
        extra_options = {}
445
446
        block = transformer_options.get("block", None)
        block_index = transformer_options.get("block_index", 0)
447
448
449
450
451
452
453
454
455
456
        transformer_patches = {}
        transformer_patches_replace = {}

        for k in transformer_options:
            if k == "patches":
                transformer_patches = transformer_options[k]
            elif k == "patches_replace":
                transformer_patches_replace = transformer_options[k]
            else:
                extra_options[k] = transformer_options[k]
457

458
459
460
        extra_options["n_heads"] = self.n_heads
        extra_options["dim_head"] = self.d_head

comfyanonymous's avatar
comfyanonymous committed
461
462
463
464
465
466
        if self.ff_in:
            x_skip = x
            x = self.ff_in(self.norm_in(x))
            if self.is_res:
                x += x_skip

467
        n = self.norm1(x)
468
469
470
471
472
473
474
475
476
477
478
479
        if self.disable_self_attn:
            context_attn1 = context
        else:
            context_attn1 = None
        value_attn1 = None

        if "attn1_patch" in transformer_patches:
            patch = transformer_patches["attn1_patch"]
            if context_attn1 is None:
                context_attn1 = n
            value_attn1 = context_attn1
            for p in patch:
480
                n, context_attn1, value_attn1 = p(n, context_attn1, value_attn1, extra_options)
481

comfyanonymous's avatar
comfyanonymous committed
482
483
484
485
        if block is not None:
            transformer_block = (block[0], block[1], block_index)
        else:
            transformer_block = None
486
487
488
489
490
491
492
493
494
495
496
497
498
499
        attn1_replace_patch = transformer_patches_replace.get("attn1", {})
        block_attn1 = transformer_block
        if block_attn1 not in attn1_replace_patch:
            block_attn1 = block

        if block_attn1 in attn1_replace_patch:
            if context_attn1 is None:
                context_attn1 = n
                value_attn1 = n
            n = self.attn1.to_q(n)
            context_attn1 = self.attn1.to_k(context_attn1)
            value_attn1 = self.attn1.to_v(value_attn1)
            n = attn1_replace_patch[block_attn1](n, context_attn1, value_attn1, extra_options)
            n = self.attn1.to_out(n)
500
        else:
501
            n = self.attn1(n, context=context_attn1, value=value_attn1)
502

503
504
505
506
507
        if "attn1_output_patch" in transformer_patches:
            patch = transformer_patches["attn1_output_patch"]
            for p in patch:
                n = p(n, extra_options)

508
        x += n
509
510
511
        if "middle_patch" in transformer_patches:
            patch = transformer_patches["middle_patch"]
            for p in patch:
512
                x = p(x, extra_options)
513

comfyanonymous's avatar
comfyanonymous committed
514
515
516
517
518
519
520
521
522
        if self.attn2 is not None:
            n = self.norm2(x)
            if self.switch_temporal_ca_to_sa:
                context_attn2 = n
            else:
                context_attn2 = context
            value_attn2 = None
            if "attn2_patch" in transformer_patches:
                patch = transformer_patches["attn2_patch"]
523
                value_attn2 = context_attn2
comfyanonymous's avatar
comfyanonymous committed
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
                for p in patch:
                    n, context_attn2, value_attn2 = p(n, context_attn2, value_attn2, extra_options)

            attn2_replace_patch = transformer_patches_replace.get("attn2", {})
            block_attn2 = transformer_block
            if block_attn2 not in attn2_replace_patch:
                block_attn2 = block

            if block_attn2 in attn2_replace_patch:
                if value_attn2 is None:
                    value_attn2 = context_attn2
                n = self.attn2.to_q(n)
                context_attn2 = self.attn2.to_k(context_attn2)
                value_attn2 = self.attn2.to_v(value_attn2)
                n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options)
                n = self.attn2.to_out(n)
            else:
                n = self.attn2(n, context=context_attn2, value=value_attn2)
542

543
544
545
546
547
        if "attn2_output_patch" in transformer_patches:
            patch = transformer_patches["attn2_output_patch"]
            for p in patch:
                n = p(n, extra_options)

548
        x += n
comfyanonymous's avatar
comfyanonymous committed
549
550
551
552
553
554
        if self.is_res:
            x_skip = x
        x = self.ff(self.norm3(x))
        if self.is_res:
            x += x_skip

comfyanonymous's avatar
comfyanonymous committed
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
        return x


class SpatialTransformer(nn.Module):
    """
    Transformer block for image-like data.
    First, project the input (aka embedding)
    and reshape to b, t, d.
    Then apply standard transformer action.
    Finally, reshape to image
    NEW: use_linear for more efficiency instead of the 1x1 convs
    """
    def __init__(self, in_channels, n_heads, d_head,
                 depth=1, dropout=0., context_dim=None,
                 disable_self_attn=False, use_linear=False,
comfyanonymous's avatar
comfyanonymous committed
570
                 use_checkpoint=True, dtype=None, device=None, operations=ops):
comfyanonymous's avatar
comfyanonymous committed
571
572
        super().__init__()
        if exists(context_dim) and not isinstance(context_dim, list):
573
            context_dim = [context_dim] * depth
comfyanonymous's avatar
comfyanonymous committed
574
575
        self.in_channels = in_channels
        inner_dim = n_heads * d_head
576
        self.norm = operations.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
577
        if not use_linear:
comfyanonymous's avatar
comfyanonymous committed
578
            self.proj_in = operations.Conv2d(in_channels,
comfyanonymous's avatar
comfyanonymous committed
579
580
581
                                     inner_dim,
                                     kernel_size=1,
                                     stride=1,
582
                                     padding=0, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
583
        else:
comfyanonymous's avatar
comfyanonymous committed
584
            self.proj_in = operations.Linear(in_channels, inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
585
586
587

        self.transformer_blocks = nn.ModuleList(
            [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],
comfyanonymous's avatar
comfyanonymous committed
588
                                   disable_self_attn=disable_self_attn, checkpoint=use_checkpoint, dtype=dtype, device=device, operations=operations)
comfyanonymous's avatar
comfyanonymous committed
589
590
591
                for d in range(depth)]
        )
        if not use_linear:
comfyanonymous's avatar
comfyanonymous committed
592
            self.proj_out = operations.Conv2d(inner_dim,in_channels,
comfyanonymous's avatar
comfyanonymous committed
593
594
                                                  kernel_size=1,
                                                  stride=1,
595
                                                  padding=0, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
596
        else:
comfyanonymous's avatar
comfyanonymous committed
597
            self.proj_out = operations.Linear(in_channels, inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
598
599
        self.use_linear = use_linear

600
    def forward(self, x, context=None, transformer_options={}):
comfyanonymous's avatar
comfyanonymous committed
601
602
        # note: if no context is given, cross-attention defaults to self-attention
        if not isinstance(context, list):
603
            context = [context] * len(self.transformer_blocks)
comfyanonymous's avatar
comfyanonymous committed
604
605
606
607
608
609
610
611
612
        b, c, h, w = x.shape
        x_in = x
        x = self.norm(x)
        if not self.use_linear:
            x = self.proj_in(x)
        x = rearrange(x, 'b c h w -> b (h w) c').contiguous()
        if self.use_linear:
            x = self.proj_in(x)
        for i, block in enumerate(self.transformer_blocks):
613
            transformer_options["block_index"] = i
614
            x = block(x, context=context[i], transformer_options=transformer_options)
comfyanonymous's avatar
comfyanonymous committed
615
616
617
618
619
620
621
        if self.use_linear:
            x = self.proj_out(x)
        x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()
        if not self.use_linear:
            x = self.proj_out(x)
        return x + x_in

comfyanonymous's avatar
comfyanonymous committed
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643

class SpatialVideoTransformer(SpatialTransformer):
    def __init__(
        self,
        in_channels,
        n_heads,
        d_head,
        depth=1,
        dropout=0.0,
        use_linear=False,
        context_dim=None,
        use_spatial_context=False,
        timesteps=None,
        merge_strategy: str = "fixed",
        merge_factor: float = 0.5,
        time_context_dim=None,
        ff_in=False,
        checkpoint=False,
        time_depth=1,
        disable_self_attn=False,
        disable_temporal_crossattention=False,
        max_time_embed_period: int = 10000,
comfyanonymous's avatar
comfyanonymous committed
644
        dtype=None, device=None, operations=ops
comfyanonymous's avatar
comfyanonymous committed
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
    ):
        super().__init__(
            in_channels,
            n_heads,
            d_head,
            depth=depth,
            dropout=dropout,
            use_checkpoint=checkpoint,
            context_dim=context_dim,
            use_linear=use_linear,
            disable_self_attn=disable_self_attn,
            dtype=dtype, device=device, operations=operations
        )
        self.time_depth = time_depth
        self.depth = depth
        self.max_time_embed_period = max_time_embed_period

        time_mix_d_head = d_head
        n_time_mix_heads = n_heads

        time_mix_inner_dim = int(time_mix_d_head * n_time_mix_heads)

        inner_dim = n_heads * d_head
        if use_spatial_context:
            time_context_dim = context_dim

        self.time_stack = nn.ModuleList(
            [
                BasicTransformerBlock(
                    inner_dim,
                    n_time_mix_heads,
                    time_mix_d_head,
                    dropout=dropout,
                    context_dim=time_context_dim,
                    # timesteps=timesteps,
                    checkpoint=checkpoint,
                    ff_in=ff_in,
                    inner_dim=time_mix_inner_dim,
                    disable_self_attn=disable_self_attn,
                    disable_temporal_crossattention=disable_temporal_crossattention,
                    dtype=dtype, device=device, operations=operations
                )
                for _ in range(self.depth)
            ]
        )

        assert len(self.time_stack) == len(self.transformer_blocks)

        self.use_spatial_context = use_spatial_context
        self.in_channels = in_channels

        time_embed_dim = self.in_channels * 4
        self.time_pos_embed = nn.Sequential(
            operations.Linear(self.in_channels, time_embed_dim, dtype=dtype, device=device),
            nn.SiLU(),
            operations.Linear(time_embed_dim, self.in_channels, dtype=dtype, device=device),
        )

        self.time_mixer = AlphaBlender(
            alpha=merge_factor, merge_strategy=merge_strategy
        )

    def forward(
        self,
        x: torch.Tensor,
        context: Optional[torch.Tensor] = None,
        time_context: Optional[torch.Tensor] = None,
        timesteps: Optional[int] = None,
        image_only_indicator: Optional[torch.Tensor] = None,
        transformer_options={}
    ) -> torch.Tensor:
        _, _, h, w = x.shape
        x_in = x
        spatial_context = None
        if exists(context):
            spatial_context = context

        if self.use_spatial_context:
            assert (
                context.ndim == 3
            ), f"n dims of spatial context should be 3 but are {context.ndim}"

            if time_context is None:
                time_context = context
            time_context_first_timestep = time_context[::timesteps]
            time_context = repeat(
                time_context_first_timestep, "b ... -> (b n) ...", n=h * w
            )
        elif time_context is not None and not self.use_spatial_context:
            time_context = repeat(time_context, "b ... -> (b n) ...", n=h * w)
            if time_context.ndim == 2:
                time_context = rearrange(time_context, "b c -> b 1 c")

        x = self.norm(x)
        if not self.use_linear:
            x = self.proj_in(x)
        x = rearrange(x, "b c h w -> b (h w) c")
        if self.use_linear:
            x = self.proj_in(x)

        num_frames = torch.arange(timesteps, device=x.device)
        num_frames = repeat(num_frames, "t -> b t", b=x.shape[0] // timesteps)
        num_frames = rearrange(num_frames, "b t -> (b t)")
        t_emb = timestep_embedding(num_frames, self.in_channels, repeat_only=False, max_period=self.max_time_embed_period).to(x.dtype)
        emb = self.time_pos_embed(t_emb)
        emb = emb[:, None, :]

        for it_, (block, mix_block) in enumerate(
            zip(self.transformer_blocks, self.time_stack)
        ):
            transformer_options["block_index"] = it_
            x = block(
                x,
                context=spatial_context,
                transformer_options=transformer_options,
            )

            x_mix = x
            x_mix = x_mix + emb

            B, S, C = x_mix.shape
            x_mix = rearrange(x_mix, "(b t) s c -> (b s) t c", t=timesteps)
            x_mix = mix_block(x_mix, context=time_context) #TODO: transformer_options
            x_mix = rearrange(
                x_mix, "(b s) t c -> (b t) s c", s=S, b=B // timesteps, c=C, t=timesteps
            )

            x = self.time_mixer(x_spatial=x, x_temporal=x_mix, image_only_indicator=image_only_indicator)

        if self.use_linear:
            x = self.proj_out(x)
        x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w)
        if not self.use_linear:
            x = self.proj_out(x)
        out = x + x_in
        return out