attention.py 26.8 KB
Newer Older
comfyanonymous's avatar
comfyanonymous committed
1
2
3
4
5
6
7
from inspect import isfunction
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from typing import Optional, Any
comfyanonymous's avatar
comfyanonymous committed
8
from functools import partial
comfyanonymous's avatar
comfyanonymous committed
9

comfyanonymous's avatar
comfyanonymous committed
10
11

from .diffusionmodules.util import checkpoint, AlphaBlender, timestep_embedding
comfyanonymous's avatar
comfyanonymous committed
12
13
from .sub_quadratic_attention import efficient_dot_product_attention

14
from comfy import model_management
15

16
if model_management.xformers_enabled():
comfyanonymous's avatar
comfyanonymous committed
17
18
19
    import xformers
    import xformers.ops

comfyanonymous's avatar
comfyanonymous committed
20
from comfy.cli_args import args
comfyanonymous's avatar
comfyanonymous committed
21
22
import comfy.ops

comfyanonymous's avatar
comfyanonymous committed
23
# CrossAttn precision handling
comfyanonymous's avatar
comfyanonymous committed
24
25
26
27
28
if args.dont_upcast_attention:
    print("disabling upcasting of attention")
    _ATTN_PRECISION = "fp16"
else:
    _ATTN_PRECISION = "fp32"
comfyanonymous's avatar
comfyanonymous committed
29

30

comfyanonymous's avatar
comfyanonymous committed
31
32
33
34
35
36
37
38
39
40
41
def exists(val):
    return val is not None


def uniq(arr):
    return{el: True for el in arr}.keys()


def default(val, d):
    if exists(val):
        return val
42
    return d
comfyanonymous's avatar
comfyanonymous committed
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57


def max_neg_value(t):
    return -torch.finfo(t.dtype).max


def init_(tensor):
    dim = tensor.shape[-1]
    std = 1 / math.sqrt(dim)
    tensor.uniform_(-std, std)
    return tensor


# feedforward
class GEGLU(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
58
    def __init__(self, dim_in, dim_out, dtype=None, device=None, operations=comfy.ops):
comfyanonymous's avatar
comfyanonymous committed
59
        super().__init__()
comfyanonymous's avatar
comfyanonymous committed
60
        self.proj = operations.Linear(dim_in, dim_out * 2, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
61
62
63
64
65
66
67

    def forward(self, x):
        x, gate = self.proj(x).chunk(2, dim=-1)
        return x * F.gelu(gate)


class FeedForward(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
68
    def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0., dtype=None, device=None, operations=comfy.ops):
comfyanonymous's avatar
comfyanonymous committed
69
70
71
72
        super().__init__()
        inner_dim = int(dim * mult)
        dim_out = default(dim_out, dim)
        project_in = nn.Sequential(
comfyanonymous's avatar
comfyanonymous committed
73
            operations.Linear(dim, inner_dim, dtype=dtype, device=device),
comfyanonymous's avatar
comfyanonymous committed
74
            nn.GELU()
comfyanonymous's avatar
comfyanonymous committed
75
        ) if not glu else GEGLU(dim, inner_dim, dtype=dtype, device=device, operations=operations)
comfyanonymous's avatar
comfyanonymous committed
76
77
78
79

        self.net = nn.Sequential(
            project_in,
            nn.Dropout(dropout),
comfyanonymous's avatar
comfyanonymous committed
80
            operations.Linear(inner_dim, dim_out, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
        )

    def forward(self, x):
        return self.net(x)


def zero_module(module):
    """
    Zero out the parameters of a module and return it.
    """
    for p in module.parameters():
        p.detach().zero_()
    return module


96
97
def Normalize(in_channels, dtype=None, device=None):
    return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
98

99
def attention_basic(q, k, v, heads, mask=None):
100
101
102
103
    b, _, dim_head = q.shape
    dim_head //= heads
    scale = dim_head ** -0.5

104
    h = heads
105
106
107
108
109
110
111
112
    q, k, v = map(
        lambda t: t.unsqueeze(3)
        .reshape(b, -1, heads, dim_head)
        .permute(0, 2, 1, 3)
        .reshape(b * heads, -1, dim_head)
        .contiguous(),
        (q, k, v),
    )
113
114
115
116
117
118
119
120

    # force cast to fp32 to avoid overflowing
    if _ATTN_PRECISION =="fp32":
        with torch.autocast(enabled=False, device_type = 'cuda'):
            q, k = q.float(), k.float()
            sim = einsum('b i d, b j d -> b i j', q, k) * scale
    else:
        sim = einsum('b i d, b j d -> b i j', q, k) * scale
comfyanonymous's avatar
comfyanonymous committed
121

122
    del q, k
comfyanonymous's avatar
comfyanonymous committed
123

124
125
126
127
128
    if exists(mask):
        mask = rearrange(mask, 'b ... -> b (...)')
        max_neg_value = -torch.finfo(sim.dtype).max
        mask = repeat(mask, 'b j -> (b h) () j', h=h)
        sim.masked_fill_(~mask, max_neg_value)
comfyanonymous's avatar
comfyanonymous committed
129

130
131
    # attention, what we cannot get enough of
    sim = sim.softmax(dim=-1)
comfyanonymous's avatar
comfyanonymous committed
132

133
    out = einsum('b i j, b j d -> b i d', sim.to(v.dtype), v)
134
135
136
137
138
139
    out = (
        out.unsqueeze(0)
        .reshape(b, heads, -1, dim_head)
        .permute(0, 2, 1, 3)
        .reshape(b, -1, heads * dim_head)
    )
140
    return out
comfyanonymous's avatar
comfyanonymous committed
141
142


143
def attention_sub_quad(query, key, value, heads, mask=None):
144
145
146
147
148
149
150
151
    b, _, dim_head = query.shape
    dim_head //= heads

    scale = dim_head ** -0.5
    query = query.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head)
    value = value.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 1, 3).reshape(b * heads, -1, dim_head)

    key = key.unsqueeze(3).reshape(b, -1, heads, dim_head).permute(0, 2, 3, 1).reshape(b * heads, dim_head, -1)
comfyanonymous's avatar
comfyanonymous committed
152

153
154
155
156
157
158
159
    dtype = query.dtype
    upcast_attention = _ATTN_PRECISION =="fp32" and query.dtype != torch.float32
    if upcast_attention:
        bytes_per_token = torch.finfo(torch.float32).bits//8
    else:
        bytes_per_token = torch.finfo(query.dtype).bits//8
    batch_x_heads, q_tokens, _ = query.shape
160
    _, _, k_tokens = key.shape
161
    qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens
comfyanonymous's avatar
comfyanonymous committed
162

163
    mem_free_total, mem_free_torch = model_management.get_free_memory(query.device, True)
comfyanonymous's avatar
comfyanonymous committed
164

165
    kv_chunk_size_min = None
166
167
168
169
170
171
172
173
174
175
176
177
    kv_chunk_size = None
    query_chunk_size = None

    for x in [4096, 2048, 1024, 512, 256]:
        count = mem_free_total / (batch_x_heads * bytes_per_token * x * 4.0)
        if count >= k_tokens:
            kv_chunk_size = k_tokens
            query_chunk_size = x
            break

    if query_chunk_size is None:
        query_chunk_size = 512
178
179
180

    hidden_states = efficient_dot_product_attention(
        query,
181
        key,
182
183
184
185
186
187
188
189
190
191
192
193
194
195
        value,
        query_chunk_size=query_chunk_size,
        kv_chunk_size=kv_chunk_size,
        kv_chunk_size_min=kv_chunk_size_min,
        use_checkpoint=False,
        upcast_attention=upcast_attention,
    )

    hidden_states = hidden_states.to(dtype)

    hidden_states = hidden_states.unflatten(0, (-1, heads)).transpose(1,2).flatten(start_dim=2)
    return hidden_states

def attention_split(q, k, v, heads, mask=None):
196
197
198
199
    b, _, dim_head = q.shape
    dim_head //= heads
    scale = dim_head ** -0.5

200
    h = heads
201
202
203
204
205
206
207
208
    q, k, v = map(
        lambda t: t.unsqueeze(3)
        .reshape(b, -1, heads, dim_head)
        .permute(0, 2, 1, 3)
        .reshape(b * heads, -1, dim_head)
        .contiguous(),
        (q, k, v),
    )
209
210
211
212
213

    r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)

    mem_free_total = model_management.get_free_memory(q.device)

214
215
216
217
218
    if _ATTN_PRECISION =="fp32":
        element_size = 4
    else:
        element_size = q.element_size()

219
    gb = 1024 ** 3
220
    tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * element_size
221
    modifier = 3
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
    mem_required = tensor_size * modifier
    steps = 1


    if mem_required > mem_free_total:
        steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2)))
        # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB "
        #      f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}")

    if steps > 64:
        max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64
        raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
                            f'Need: {mem_required/64/gb:0.1f}GB free, Have:{mem_free_total/gb:0.1f}GB free')

    # print("steps", steps, mem_required, mem_free_total, modifier, q.element_size(), tensor_size)
    first_op_done = False
    cleared_cache = False
    while True:
        try:
            slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
            for i in range(0, q.shape[1], slice_size):
                end = i + slice_size
                if _ATTN_PRECISION =="fp32":
                    with torch.autocast(enabled=False, device_type = 'cuda'):
                        s1 = einsum('b i d, b j d -> b i j', q[:, i:end].float(), k.float()) * scale
comfyanonymous's avatar
comfyanonymous committed
247
                else:
248
249
250
251
                    s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) * scale

                s2 = s1.softmax(dim=-1).to(v.dtype)
                del s1
252
                first_op_done = True
253
254
255
256
257
258
259
260
261
262
263
264
265

                r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v)
                del s2
            break
        except model_management.OOM_EXCEPTION as e:
            if first_op_done == False:
                model_management.soft_empty_cache(True)
                if cleared_cache == False:
                    cleared_cache = True
                    print("out of memory error, emptying cache and trying again")
                    continue
                steps *= 2
                if steps > 64:
comfyanonymous's avatar
comfyanonymous committed
266
                    raise e
267
268
269
270
271
272
                print("out of memory error, increasing steps and trying again", steps)
            else:
                raise e

    del q, k, v

273
274
275
276
277
278
279
    r1 = (
        r1.unsqueeze(0)
        .reshape(b, heads, -1, dim_head)
        .permute(0, 2, 1, 3)
        .reshape(b, -1, heads * dim_head)
    )
    return r1
280

281
282
283
284
285
286
287
288
BROKEN_XFORMERS = False
try:
    x_vers = xformers.__version__
    #I think 0.0.23 is also broken (q with bs bigger than 65535 gives CUDA error)
    BROKEN_XFORMERS = x_vers.startswith("0.0.21") or x_vers.startswith("0.0.22") or x_vers.startswith("0.0.23")
except:
    pass

289
def attention_xformers(q, k, v, heads, mask=None):
290
291
    b, _, dim_head = q.shape
    dim_head //= heads
292
293
294
    if BROKEN_XFORMERS:
        if b * heads > 65535:
            return attention_pytorch(q, k, v, heads, mask)
295

296
297
    q, k, v = map(
        lambda t: t.unsqueeze(3)
298
        .reshape(b, -1, heads, dim_head)
299
        .permute(0, 2, 1, 3)
300
        .reshape(b * heads, -1, dim_head)
301
302
303
304
305
306
307
308
309
310
311
        .contiguous(),
        (q, k, v),
    )

    # actually compute the attention, what we cannot get enough of
    out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None)

    if exists(mask):
        raise NotImplementedError
    out = (
        out.unsqueeze(0)
312
        .reshape(b, heads, -1, dim_head)
313
        .permute(0, 2, 1, 3)
314
        .reshape(b, -1, heads * dim_head)
315
316
317
318
319
320
321
322
323
324
325
    )
    return out

def attention_pytorch(q, k, v, heads, mask=None):
    b, _, dim_head = q.shape
    dim_head //= heads
    q, k, v = map(
        lambda t: t.view(b, -1, heads, dim_head).transpose(1, 2),
        (q, k, v),
    )

326
    out = torch.nn.functional.scaled_dot_product_attention(q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False)
327
328
329
330
331
    out = (
        out.transpose(1, 2).reshape(b, -1, heads * dim_head)
    )
    return out

332

333
optimized_attention = attention_basic
334
optimized_attention_masked = attention_basic
comfyanonymous's avatar
comfyanonymous committed
335

336
337
338
339
340
341
342
343
344
345
346
347
348
if model_management.xformers_enabled():
    print("Using xformers cross attention")
    optimized_attention = attention_xformers
elif model_management.pytorch_attention_enabled():
    print("Using pytorch cross attention")
    optimized_attention = attention_pytorch
else:
    if args.use_split_cross_attention:
        print("Using split optimization for cross attention")
        optimized_attention = attention_split
    else:
        print("Using sub quadratic optimization for cross attention, if you have memory or speed issues try using: --use-split-cross-attention")
        optimized_attention = attention_sub_quad
comfyanonymous's avatar
comfyanonymous committed
349

350
351
352
if model_management.pytorch_attention_enabled():
    optimized_attention_masked = attention_pytorch

353
class CrossAttention(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
354
    def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0., dtype=None, device=None, operations=comfy.ops):
355
356
357
358
359
360
361
        super().__init__()
        inner_dim = dim_head * heads
        context_dim = default(context_dim, query_dim)

        self.heads = heads
        self.dim_head = dim_head

comfyanonymous's avatar
comfyanonymous committed
362
363
364
        self.to_q = operations.Linear(query_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_k = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
        self.to_v = operations.Linear(context_dim, inner_dim, bias=False, dtype=dtype, device=device)
365

comfyanonymous's avatar
comfyanonymous committed
366
        self.to_out = nn.Sequential(operations.Linear(inner_dim, query_dim, dtype=dtype, device=device), nn.Dropout(dropout))
367

368
    def forward(self, x, context=None, value=None, mask=None):
369
370
371
        q = self.to_q(x)
        context = default(context, x)
        k = self.to_k(context)
372
373
374
375
376
        if value is not None:
            v = self.to_v(value)
            del value
        else:
            v = self.to_v(context)
377

378
379
380
381
        if mask is None:
            out = optimized_attention(q, k, v, self.heads)
        else:
            out = optimized_attention_masked(q, k, v, self.heads, mask)
382
383
        return self.to_out(out)

384

comfyanonymous's avatar
comfyanonymous committed
385
class BasicTransformerBlock(nn.Module):
comfyanonymous's avatar
comfyanonymous committed
386
387
    def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True, ff_in=False, inner_dim=None,
                 disable_self_attn=False, disable_temporal_crossattention=False, switch_temporal_ca_to_sa=False, dtype=None, device=None, operations=comfy.ops):
comfyanonymous's avatar
comfyanonymous committed
388
        super().__init__()
comfyanonymous's avatar
comfyanonymous committed
389
390
391
392
393
394
395
396
397
398
399

        self.ff_in = ff_in or inner_dim is not None
        if inner_dim is None:
            inner_dim = dim

        self.is_res = inner_dim == dim

        if self.ff_in:
            self.norm_in = nn.LayerNorm(dim, dtype=dtype, device=device)
            self.ff_in = FeedForward(dim, dim_out=inner_dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations)

comfyanonymous's avatar
comfyanonymous committed
400
        self.disable_self_attn = disable_self_attn
comfyanonymous's avatar
comfyanonymous committed
401
        self.attn1 = CrossAttention(query_dim=inner_dim, heads=n_heads, dim_head=d_head, dropout=dropout,
comfyanonymous's avatar
comfyanonymous committed
402
                              context_dim=context_dim if self.disable_self_attn else None, dtype=dtype, device=device, operations=operations)  # is a self-attention if not self.disable_self_attn
comfyanonymous's avatar
comfyanonymous committed
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
        self.ff = FeedForward(inner_dim, dim_out=dim, dropout=dropout, glu=gated_ff, dtype=dtype, device=device, operations=operations)

        if disable_temporal_crossattention:
            if switch_temporal_ca_to_sa:
                raise ValueError
            else:
                self.attn2 = None
        else:
            context_dim_attn2 = None
            if not switch_temporal_ca_to_sa:
                context_dim_attn2 = context_dim

            self.attn2 = CrossAttention(query_dim=inner_dim, context_dim=context_dim_attn2,
                                heads=n_heads, dim_head=d_head, dropout=dropout, dtype=dtype, device=device, operations=operations)  # is self-attn if context is none
            self.norm2 = nn.LayerNorm(inner_dim, dtype=dtype, device=device)

        self.norm1 = nn.LayerNorm(inner_dim, dtype=dtype, device=device)
        self.norm3 = nn.LayerNorm(inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
421
        self.checkpoint = checkpoint
422
423
        self.n_heads = n_heads
        self.d_head = d_head
comfyanonymous's avatar
comfyanonymous committed
424
        self.switch_temporal_ca_to_sa = switch_temporal_ca_to_sa
comfyanonymous's avatar
comfyanonymous committed
425

426
427
    def forward(self, x, context=None, transformer_options={}):
        return checkpoint(self._forward, (x, context, transformer_options), self.parameters(), self.checkpoint)
comfyanonymous's avatar
comfyanonymous committed
428

429
    def _forward(self, x, context=None, transformer_options={}):
430
        extra_options = {}
431
432
        block = transformer_options.get("block", None)
        block_index = transformer_options.get("block_index", 0)
433
434
435
436
437
438
439
440
441
442
        transformer_patches = {}
        transformer_patches_replace = {}

        for k in transformer_options:
            if k == "patches":
                transformer_patches = transformer_options[k]
            elif k == "patches_replace":
                transformer_patches_replace = transformer_options[k]
            else:
                extra_options[k] = transformer_options[k]
443

444
445
446
        extra_options["n_heads"] = self.n_heads
        extra_options["dim_head"] = self.d_head

comfyanonymous's avatar
comfyanonymous committed
447
448
449
450
451
452
        if self.ff_in:
            x_skip = x
            x = self.ff_in(self.norm_in(x))
            if self.is_res:
                x += x_skip

453
        n = self.norm1(x)
454
455
456
457
458
459
460
461
462
463
464
465
        if self.disable_self_attn:
            context_attn1 = context
        else:
            context_attn1 = None
        value_attn1 = None

        if "attn1_patch" in transformer_patches:
            patch = transformer_patches["attn1_patch"]
            if context_attn1 is None:
                context_attn1 = n
            value_attn1 = context_attn1
            for p in patch:
466
                n, context_attn1, value_attn1 = p(n, context_attn1, value_attn1, extra_options)
467

comfyanonymous's avatar
comfyanonymous committed
468
469
470
471
        if block is not None:
            transformer_block = (block[0], block[1], block_index)
        else:
            transformer_block = None
472
473
474
475
476
477
478
479
480
481
482
483
484
485
        attn1_replace_patch = transformer_patches_replace.get("attn1", {})
        block_attn1 = transformer_block
        if block_attn1 not in attn1_replace_patch:
            block_attn1 = block

        if block_attn1 in attn1_replace_patch:
            if context_attn1 is None:
                context_attn1 = n
                value_attn1 = n
            n = self.attn1.to_q(n)
            context_attn1 = self.attn1.to_k(context_attn1)
            value_attn1 = self.attn1.to_v(value_attn1)
            n = attn1_replace_patch[block_attn1](n, context_attn1, value_attn1, extra_options)
            n = self.attn1.to_out(n)
486
        else:
487
            n = self.attn1(n, context=context_attn1, value=value_attn1)
488

489
490
491
492
493
        if "attn1_output_patch" in transformer_patches:
            patch = transformer_patches["attn1_output_patch"]
            for p in patch:
                n = p(n, extra_options)

494
        x += n
495
496
497
        if "middle_patch" in transformer_patches:
            patch = transformer_patches["middle_patch"]
            for p in patch:
498
                x = p(x, extra_options)
499

comfyanonymous's avatar
comfyanonymous committed
500
501
502
503
504
505
506
507
508
        if self.attn2 is not None:
            n = self.norm2(x)
            if self.switch_temporal_ca_to_sa:
                context_attn2 = n
            else:
                context_attn2 = context
            value_attn2 = None
            if "attn2_patch" in transformer_patches:
                patch = transformer_patches["attn2_patch"]
509
                value_attn2 = context_attn2
comfyanonymous's avatar
comfyanonymous committed
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
                for p in patch:
                    n, context_attn2, value_attn2 = p(n, context_attn2, value_attn2, extra_options)

            attn2_replace_patch = transformer_patches_replace.get("attn2", {})
            block_attn2 = transformer_block
            if block_attn2 not in attn2_replace_patch:
                block_attn2 = block

            if block_attn2 in attn2_replace_patch:
                if value_attn2 is None:
                    value_attn2 = context_attn2
                n = self.attn2.to_q(n)
                context_attn2 = self.attn2.to_k(context_attn2)
                value_attn2 = self.attn2.to_v(value_attn2)
                n = attn2_replace_patch[block_attn2](n, context_attn2, value_attn2, extra_options)
                n = self.attn2.to_out(n)
            else:
                n = self.attn2(n, context=context_attn2, value=value_attn2)
528

529
530
531
532
533
        if "attn2_output_patch" in transformer_patches:
            patch = transformer_patches["attn2_output_patch"]
            for p in patch:
                n = p(n, extra_options)

534
        x += n
comfyanonymous's avatar
comfyanonymous committed
535
536
537
538
539
540
        if self.is_res:
            x_skip = x
        x = self.ff(self.norm3(x))
        if self.is_res:
            x += x_skip

comfyanonymous's avatar
comfyanonymous committed
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
        return x


class SpatialTransformer(nn.Module):
    """
    Transformer block for image-like data.
    First, project the input (aka embedding)
    and reshape to b, t, d.
    Then apply standard transformer action.
    Finally, reshape to image
    NEW: use_linear for more efficiency instead of the 1x1 convs
    """
    def __init__(self, in_channels, n_heads, d_head,
                 depth=1, dropout=0., context_dim=None,
                 disable_self_attn=False, use_linear=False,
comfyanonymous's avatar
comfyanonymous committed
556
                 use_checkpoint=True, dtype=None, device=None, operations=comfy.ops):
comfyanonymous's avatar
comfyanonymous committed
557
558
        super().__init__()
        if exists(context_dim) and not isinstance(context_dim, list):
559
            context_dim = [context_dim] * depth
comfyanonymous's avatar
comfyanonymous committed
560
561
        self.in_channels = in_channels
        inner_dim = n_heads * d_head
562
        self.norm = Normalize(in_channels, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
563
        if not use_linear:
comfyanonymous's avatar
comfyanonymous committed
564
            self.proj_in = operations.Conv2d(in_channels,
comfyanonymous's avatar
comfyanonymous committed
565
566
567
                                     inner_dim,
                                     kernel_size=1,
                                     stride=1,
568
                                     padding=0, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
569
        else:
comfyanonymous's avatar
comfyanonymous committed
570
            self.proj_in = operations.Linear(in_channels, inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
571
572
573

        self.transformer_blocks = nn.ModuleList(
            [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],
comfyanonymous's avatar
comfyanonymous committed
574
                                   disable_self_attn=disable_self_attn, checkpoint=use_checkpoint, dtype=dtype, device=device, operations=operations)
comfyanonymous's avatar
comfyanonymous committed
575
576
577
                for d in range(depth)]
        )
        if not use_linear:
comfyanonymous's avatar
comfyanonymous committed
578
            self.proj_out = operations.Conv2d(inner_dim,in_channels,
comfyanonymous's avatar
comfyanonymous committed
579
580
                                                  kernel_size=1,
                                                  stride=1,
581
                                                  padding=0, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
582
        else:
comfyanonymous's avatar
comfyanonymous committed
583
            self.proj_out = operations.Linear(in_channels, inner_dim, dtype=dtype, device=device)
comfyanonymous's avatar
comfyanonymous committed
584
585
        self.use_linear = use_linear

586
    def forward(self, x, context=None, transformer_options={}):
comfyanonymous's avatar
comfyanonymous committed
587
588
        # note: if no context is given, cross-attention defaults to self-attention
        if not isinstance(context, list):
589
            context = [context] * len(self.transformer_blocks)
comfyanonymous's avatar
comfyanonymous committed
590
591
592
593
594
595
596
597
598
        b, c, h, w = x.shape
        x_in = x
        x = self.norm(x)
        if not self.use_linear:
            x = self.proj_in(x)
        x = rearrange(x, 'b c h w -> b (h w) c').contiguous()
        if self.use_linear:
            x = self.proj_in(x)
        for i, block in enumerate(self.transformer_blocks):
599
            transformer_options["block_index"] = i
600
            x = block(x, context=context[i], transformer_options=transformer_options)
comfyanonymous's avatar
comfyanonymous committed
601
602
603
604
605
606
607
        if self.use_linear:
            x = self.proj_out(x)
        x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()
        if not self.use_linear:
            x = self.proj_out(x)
        return x + x_in

comfyanonymous's avatar
comfyanonymous committed
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768

class SpatialVideoTransformer(SpatialTransformer):
    def __init__(
        self,
        in_channels,
        n_heads,
        d_head,
        depth=1,
        dropout=0.0,
        use_linear=False,
        context_dim=None,
        use_spatial_context=False,
        timesteps=None,
        merge_strategy: str = "fixed",
        merge_factor: float = 0.5,
        time_context_dim=None,
        ff_in=False,
        checkpoint=False,
        time_depth=1,
        disable_self_attn=False,
        disable_temporal_crossattention=False,
        max_time_embed_period: int = 10000,
        dtype=None, device=None, operations=comfy.ops
    ):
        super().__init__(
            in_channels,
            n_heads,
            d_head,
            depth=depth,
            dropout=dropout,
            use_checkpoint=checkpoint,
            context_dim=context_dim,
            use_linear=use_linear,
            disable_self_attn=disable_self_attn,
            dtype=dtype, device=device, operations=operations
        )
        self.time_depth = time_depth
        self.depth = depth
        self.max_time_embed_period = max_time_embed_period

        time_mix_d_head = d_head
        n_time_mix_heads = n_heads

        time_mix_inner_dim = int(time_mix_d_head * n_time_mix_heads)

        inner_dim = n_heads * d_head
        if use_spatial_context:
            time_context_dim = context_dim

        self.time_stack = nn.ModuleList(
            [
                BasicTransformerBlock(
                    inner_dim,
                    n_time_mix_heads,
                    time_mix_d_head,
                    dropout=dropout,
                    context_dim=time_context_dim,
                    # timesteps=timesteps,
                    checkpoint=checkpoint,
                    ff_in=ff_in,
                    inner_dim=time_mix_inner_dim,
                    disable_self_attn=disable_self_attn,
                    disable_temporal_crossattention=disable_temporal_crossattention,
                    dtype=dtype, device=device, operations=operations
                )
                for _ in range(self.depth)
            ]
        )

        assert len(self.time_stack) == len(self.transformer_blocks)

        self.use_spatial_context = use_spatial_context
        self.in_channels = in_channels

        time_embed_dim = self.in_channels * 4
        self.time_pos_embed = nn.Sequential(
            operations.Linear(self.in_channels, time_embed_dim, dtype=dtype, device=device),
            nn.SiLU(),
            operations.Linear(time_embed_dim, self.in_channels, dtype=dtype, device=device),
        )

        self.time_mixer = AlphaBlender(
            alpha=merge_factor, merge_strategy=merge_strategy
        )

    def forward(
        self,
        x: torch.Tensor,
        context: Optional[torch.Tensor] = None,
        time_context: Optional[torch.Tensor] = None,
        timesteps: Optional[int] = None,
        image_only_indicator: Optional[torch.Tensor] = None,
        transformer_options={}
    ) -> torch.Tensor:
        _, _, h, w = x.shape
        x_in = x
        spatial_context = None
        if exists(context):
            spatial_context = context

        if self.use_spatial_context:
            assert (
                context.ndim == 3
            ), f"n dims of spatial context should be 3 but are {context.ndim}"

            if time_context is None:
                time_context = context
            time_context_first_timestep = time_context[::timesteps]
            time_context = repeat(
                time_context_first_timestep, "b ... -> (b n) ...", n=h * w
            )
        elif time_context is not None and not self.use_spatial_context:
            time_context = repeat(time_context, "b ... -> (b n) ...", n=h * w)
            if time_context.ndim == 2:
                time_context = rearrange(time_context, "b c -> b 1 c")

        x = self.norm(x)
        if not self.use_linear:
            x = self.proj_in(x)
        x = rearrange(x, "b c h w -> b (h w) c")
        if self.use_linear:
            x = self.proj_in(x)

        num_frames = torch.arange(timesteps, device=x.device)
        num_frames = repeat(num_frames, "t -> b t", b=x.shape[0] // timesteps)
        num_frames = rearrange(num_frames, "b t -> (b t)")
        t_emb = timestep_embedding(num_frames, self.in_channels, repeat_only=False, max_period=self.max_time_embed_period).to(x.dtype)
        emb = self.time_pos_embed(t_emb)
        emb = emb[:, None, :]

        for it_, (block, mix_block) in enumerate(
            zip(self.transformer_blocks, self.time_stack)
        ):
            transformer_options["block_index"] = it_
            x = block(
                x,
                context=spatial_context,
                transformer_options=transformer_options,
            )

            x_mix = x
            x_mix = x_mix + emb

            B, S, C = x_mix.shape
            x_mix = rearrange(x_mix, "(b t) s c -> (b s) t c", t=timesteps)
            x_mix = mix_block(x_mix, context=time_context) #TODO: transformer_options
            x_mix = rearrange(
                x_mix, "(b s) t c -> (b t) s c", s=S, b=B // timesteps, c=C, t=timesteps
            )

            x = self.time_mixer(x_spatial=x, x_temporal=x_mix, image_only_indicator=image_only_indicator)

        if self.use_linear:
            x = self.proj_out(x)
        x = rearrange(x, "b (h w) c -> b c h w", h=h, w=w)
        if not self.use_linear:
            x = self.proj_out(x)
        out = x + x_in
        return out